code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src/"))
import unittest
import numpy as np
import os
from imtools import qmisc
from imtools import misc
#
class QmiscTest(unittest.TestCase):
interactivetTest = False
# interactivetTest = True
# @unittest.skip("waiting for implementation")
def test_suggest_filename(self):
"""
Testing some files. Not testing recursion in filenames. It is situation
if there exist file0, file1, file2 and input file is file
"""
filename = "mujsoubor"
# import ipdb; ipdb.set_trace() # BREAKPOINT
new_filename = misc.suggest_filename(filename, exists=True)
# self.assertTrue(new_filename == "mujsoubor2")
self.assertEqual(new_filename, "mujsoubor_2")
filename = "mujsoubor_112"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_113")
filename = "mujsoubor_2.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_3.txt")
filename = "mujsoubor27.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor27_2.txt")
filename = "mujsoubor-a24.txt"
new_filename = misc.suggest_filename(filename, exists=False)
self.assertEqual(new_filename, "mujsoubor-a24.txt", "Rewrite")
@unittest.skip("getVersionString is not used anymore")
def test_getVersionString(self):
"""
getVersionString is not used anymore
"""
vfn = "../__VERSION__"
existed = False
if not os.path.exists(vfn):
with open(vfn, 'a') as the_file:
the_file.write('1.1.1\n')
existed = False
verstr = qmisc.getVersionString()
self.assertTrue(type(verstr) == str)
if existed:
os.remove(vfn)
def test_obj_to_and_from_file_yaml(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.yaml'
misc.obj_to_file(test_object, filename, 'yaml')
saved_object = misc.obj_from_file(filename, 'yaml')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
def test_obj_to_and_from_file_pickle(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
# def test_obj_to_and_from_file_exeption(self):
# test_object = [1]
# filename = 'test_obj_to_and_from_file_exeption'
# self.assertRaises(misc.obj_to_file(test_object, filename ,'yaml'))
def test_obj_to_and_from_file_with_directories(self):
import shutil
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
dirname = '__test_write_and_read'
filename = '__test_write_and_read/test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
shutil.rmtree(dirname)
if __name__ == "__main__":
unittest.main()
| mjirik/imtools | tests/qmisc_test.py | Python | mit | 3,908 |
from django.contrib import admin
from django import forms
from django.contrib.auth.models import User
from sample.models import (Doctor, Worker, Patient, SpecialtyType, TimeSlot, Case, Comment, CommentGroup,
Scan)
class searchDoctor(admin.ModelAdmin):
list_display = ['user_first_name', 'user_last_name', 'get_some_value']
search_fields = ['user__first_name', 'user__last_name',
'specialties__name']
class searchWorker(admin.ModelAdmin):
list_display = ['user_first_name', 'user_last_name']
search_fields = ['user__first_name', 'user__last_name']
class searchPatient(admin.ModelAdmin):
list_display = ['first_name', 'last_name']
search_fields = ['first_name', 'last_name']
class searchSpeciality(admin.ModelAdmin):
search_fields = ['name']
list_display = ['name']
class searchTimeslot(admin.ModelAdmin):
search_fields = ['start_time', 'end_time']
class searchCase(admin.ModelAdmin):
search_fields = ['id']
list_display = ['id']
class searchComment(admin.ModelAdmin):
search_fields = ['text']
list_display = ['text']
class searchScan(admin.ModelAdmin):
search_fields = ['patient', 'comments']
admin.site.register(Doctor, searchDoctor)
admin.site.register(Worker, searchWorker)
admin.site.register(Patient, searchPatient)
admin.site.register(SpecialtyType, searchSpeciality)
admin.site.register(TimeSlot, searchTimeslot)
admin.site.register(Case, searchCase)
admin.site.register(Comment, searchComment)
admin.site.register(CommentGroup)
admin.site.register(Scan, searchScan)
| CSC301H-Fall2013/Ultra-Remote-Medicine | sample/admin.py | Python | mit | 1,569 |
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="violin", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/violin/_width.py | Python | mit | 472 |
from classifier import Classifier
from itertools import combinations
from datetime import datetime
import sys
import os
open_path = "PatternDumps/open"
closed_path = "PatternDumps/closed"
monitored_sites = ["cbsnews.com", "google.com", "nrk.no", "vimeo.com", "wikipedia.org", "youtube.com"]
per_burst_weight = 1
total_cells_weight = 1.1
diff_threshold = 1.5 # Higher threshold implies lower true and false positive rate
max_threshold = 7
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def indexOfSortedValues(l, descending=False):
sort = sorted(l, reverse=descending)
indices = [l.index(x) for x in sort]
return indices
def calculateDistanceVotes(vector, w):
G = indexOfSortedValues(vector)
l = float(len(vector))
votes = []
for i in range(len(vector)):
j = i
while True:
try:
r = G.index(j)
break
except:
j -= 1
v = 2*w - 2*r/l*w
if v == 2.0:
v += 2.0
votes.append(v)
return votes
def createTrainingSets(n):
l = []
for i in range(n):
l.append(range(0, i)+range(i+1, n)+range(i, i+1))
return l
def matchAgainstClassifiers(clf, fp):
pred = []
pbd = []
td = []
for c in clf:
pred.append(c.predict(fp))
pbd.append(c.perBurstDistance(fp))
td.append(c.totalDistance(fp))
pbv = calculateDistanceVotes(pbd, per_burst_weight)
tdv = calculateDistanceVotes(td, total_cells_weight)
total = [pred[i] + pbv[i] + tdv[i] for i in range(len(clf))]
return total
def getFingerprint(f_path):
with open(f_path, "r") as f:
fp = [int(dimension) for dimension in f.readlines()]
f.close()
return fp
def createClassifiers(file_list):
clf = [Classifier(site) for site in monitored_sites]
for i in file_list:
for k, site in enumerate(monitored_sites):
f_path = "%s/%s/%i.fp" % (closed_path, site, i)
fp = getFingerprint(f_path)
clf[k].train(fp)
return clf
def createExperimentSets(n_train, n_exp):
tot = n_train + n_exp
tot_r = range(tot)
combo_train = list(combinations(tot_r, n_train))
exp_sets = []
if n_train == 1:
for t in combo_train:
exp_sets.append([[y for y in t], [x for x in tot_r if x not in t]])
tot_r = [x for x in tot_r if x not in t]
else:
for t in combo_train:
exp_sets.append([[y for y in t], [x for x in tot_r if x not in t]])
return exp_sets
def closedWorldExperiment(n_train, n_exp):
experiment_sets = createExperimentSets(n_train, n_exp)
total_results = dict.fromkeys(range(0, 6), 0)
site_results = dict.fromkeys(monitored_sites, 0)
total = 0
for e_set in experiment_sets:
training_set = e_set[0]
# Create classifiers with training data
clf = createClassifiers(training_set)
for exp in e_set[1]:
for i, site in enumerate(monitored_sites):
f_path = "%s/%s/%d.fp" % (closed_path, site, exp)
fp = getFingerprint(f_path)
votes = matchAgainstClassifiers(clf, fp)
res = indexOfSortedValues(votes, descending=True)
j = 0
while True:
try:
rank = res.index(i-j)
break
except:
j += 1
total += 1
total_results[rank] += 1
if rank == 0:
site_results[site] += 1
storeClosedWorldResult(n_train, n_exp, total, total_results, site_results)
def openWorldFileList(train_range):
fp_list = []
for (dirpath, dirnames, filenames) in os.walk(closed_path):
for f in filenames:
if f[-3:] == ".fp" and not int(f[-4]) in train_range:
fp_list.append(dirpath+"/"+f)
for (dirpath, dirnames, filenames) in os.walk(open_path):
for f in filenames:
if f[-3:] == ".fp":
fp_list.append(dirpath+"/"+f)
return fp_list
# Returns True if votes imply an open world hit
def openWorldThreshold(votes):
if max(votes) > max_threshold and (max(votes)-sorted(votes)[-2]) > diff_threshold:
return True
else:
return False
# Returns true if the supplied fingerprint feature vector is predicted to belong to one of the marked sites
def openWorldPrediction(marked, feature_vector, clf):
votes = matchAgainstClassifiers(clf, feature_vector)
res = indexOfSortedValues(votes, descending=True)
guessed_site = monitored_sites[res[0]]
# The site is guessed to be one of the marked ones
if guessed_site in marked and openWorldThreshold(votes):
return True
else:
return False
def openWorldExperiment(n_train, n_classifier, marked):
true_positives = 0
false_positives = 0
false_negatives = 0
true_negatives = 0
training_sets = [x[0] for x in createExperimentSets(n_train, n_classifier)]
for training_range in training_sets:
# Create classifiers with training data, use remaining feature vectors as experiments
clf = createClassifiers(training_range)
fv_paths = openWorldFileList(training_range)
for f_path in fv_paths:
feature_vector = getFingerprint(f_path)
actual_site = f_path.split("/")[-2]
hit = openWorldPrediction(marked, feature_vector, clf)
if hit:
if actual_site in marked:
true_positives += 1
else:
false_positives += 1
else:
if actual_site in marked:
false_negatives += 1
else:
true_negatives += 1
storeOpenWorldResult(n_train, marked, true_positives, false_positives, false_negatives, true_negatives)
def storeClosedWorldResult(n_train, n_exp, total, total_results, site_results):
with open("PatternResults/closed/%s" % (str(datetime.now())), "w") as r_file:
print "Completed experiment. Achieved accuracy of %.2f%%. Detailed results stored in %s." % (100*(float(total_results[0]))/total, r_file.name)
r_file.write("Number of training instances: %d\n" % n_train)
r_file.write("Number of predictions: %d\n\n" % total)
r_file.write("Accuracy:\t%.2f\n" % (float(total_results[0])/total))
for guesses in total_results:
r_file.write("%d:\t\t%d\t%.2f\n" % (guesses, total_results[guesses], float(total_results[guesses])/total))
r_file.write("\nIndividual site accuracy:\n")
for site in site_results:
r_file.write("%s: %.2f\n" % (site, float(site_results[site])/(total/len(monitored_sites))))
r_file.close()
def storeOpenWorldResult(n_train, marked, true_positives, false_positives, false_negatives, true_negatives):
first_dir = "PatternResults/open/%s_training_instances" % n_train
mkdir(first_dir)
second_dir = "%s/%d_marked_sites" % (first_dir, len(marked))
mkdir(second_dir)
acc = float(true_positives+true_negatives)/(true_positives+false_negatives+false_positives+true_negatives)
with open("%s/%s" % (second_dir, marked), "w") as r_file:
print "Completed experiment. Achieved an accuracy of %.2f%%. Detailed results stored in %s." % (100*acc, r_file.name)
r_file.write("Number of training instances: %d\n" % n_train)
r_file.write("Marked sites: ")
for site in marked:
r_file.write(site+" ")
r_file.write("\n\nTP\tFP\tTN\tFN\n%d\t%d\t%d\t%d" % (true_positives, false_positives, true_negatives, false_negatives))
if __name__=="__main__":
try:
model = sys.argv[1]
if model not in ["closed", "open"]:
raise
except:
print "Error: first argument must be either 'open' or 'closed'"
print "Usage: python %s <closed/open> <number of training instances> <number of experiment instances> <marked sites (if open world)>" % sys.argv[0]
sys.exit()
if model == "closed":
try:
n_train = int(sys.argv[2])
n_exp = int(sys.argv[3])
except:
print "Error: second and third argument must be the number of training instances and experiments respectively"
print "Usage: python %s <closed/open> <number of training instances> <number of experiment instances> <marked sites (if open world)>" % sys.argv[0]
sys.exit()
closedWorldExperiment(n_train, n_exp)
elif model == "open":
try:
n_train = int(sys.argv[2])
n_exp = int(sys.argv[3])
except:
print "Error: second and third argument must be the number of training instances and experiments respectively"
print "Usage: python %s <closed/open> <number of training instances> <number of experiment instances> <marked sites (if open world)>" % sys.argv[0]
sys.exit()
marked = []
i = 4
while True:
try:
marked_site = sys.argv[i]
marked.append(marked_site)
i += 1
except:
break
if len(marked) == 0:
print "Error: no marked sites supplied."
print "Usage: python %s <closed/open> <number of training instances> <number of experiment instances> <marked sites (if open world)>" % sys.argv[0]
sys.exit()
for site in marked:
if site not in monitored_sites:
print "Error: site %s is not part of classifier and can thus not be used as a monitored site" % site
sys.exit()
openWorldExperiment(n_train, n_exp, marked) | chhans/tor-automation | patternexperiment.py | Python | mit | 8,474 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alloy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30, verbose_name=b"Alloy's name")),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Interpolation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('a0', models.CharField(max_length=200, verbose_name=b'Lattice parameter')),
('ac', models.CharField(max_length=200, verbose_name=b"Conductions's hydrostatic deformation potential")),
('av', models.CharField(max_length=200, verbose_name=b"Valence's hydrostatic deformation potential")),
('b', models.CharField(max_length=200, verbose_name=b'Deformation potential for tetragonal distorion')),
('c11', models.CharField(max_length=200, verbose_name=b'Elastic constant')),
('c12', models.CharField(max_length=200, verbose_name=b'Elastic constant')),
('me', models.CharField(max_length=200, verbose_name=b'Electron effective mass')),
('mhh', models.CharField(max_length=200, null=True, verbose_name=b'Heavy-hole effective mass')),
('mlh', models.CharField(max_length=200, null=True, verbose_name=b'Light-hole effective mass')),
('eg2', models.CharField(max_length=200, verbose_name=b'Gap energy at 2 K')),
('eg77', models.CharField(max_length=200, verbose_name=b'Gap energy at 77 K')),
('eg300', models.CharField(max_length=200, verbose_name=b'Gap energy at 300 K')),
('alloy', models.ForeignKey(verbose_name=b'The related alloy', to='muraki.Alloy')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('a0', models.FloatField(verbose_name=b'Lattice parameter')),
('ac', models.FloatField(verbose_name=b"Conductions's hydrostatic deformation potential")),
('av', models.FloatField(verbose_name=b"Valence's hydrostatic deformation potential")),
('b', models.FloatField(verbose_name=b'Deformation potential for tetragonal distorion')),
('c11', models.FloatField(verbose_name=b'Elastic constant')),
('c12', models.FloatField(verbose_name=b'Elastic constant')),
('me', models.FloatField(verbose_name=b'Electron effective mass')),
('mhh', models.FloatField(null=True, verbose_name=b'Heavy-hole effective mass')),
('mlh', models.FloatField(null=True, verbose_name=b'Light-hole effective mass')),
('eg2', models.FloatField(verbose_name=b'Gap energy at 2 K')),
('eg77', models.FloatField(verbose_name=b'Gap energy at 77 K')),
('eg300', models.FloatField(verbose_name=b'Gap energy at 300 K')),
('alloy', models.ForeignKey(verbose_name=b'The related alloy', to='muraki.Alloy')),
],
options={
},
bases=(models.Model,),
),
]
| thiagolcmelo/segregation | segregation/muraki/migrations/0001_initial.py | Python | mit | 3,708 |
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
import urllib2, urllib
VERIFY_SERVER="http://api-verify.recaptcha.net/verify"
class RecaptchaWidget(forms.Widget):
def render(self, name, value, attrs=None):
return mark_safe("""<script type="text/javascript"
src="http://api.recaptcha.net/challenge?k=%(public_key)s"></script>
<noscript>
<iframe src="http://api.recaptcha.net/noscript?k=%(public_key)s"
height="300" width="500" frameborder="0"></iframe><br>
<textarea name="recaptcha_challenge_field" rows="3" cols="40">
</textarea>
<input type="hidden" name="recaptcha_response_field" value="manual_challenge">
</noscript>""" %({'public_key': settings.RECAPTCHA_PUBLIC_KEY}))
def value_from_datadict(self, data, files, name):
return {
'recaptcha_challenge_field': data.get('recaptcha_challenge_field', None),
'recaptcha_response_field' : data.get('recaptcha_response_field', None),
'remoteip' : data.get('remoteip', None)
}
class RecaptchaField(forms.Field):
default_error_messages = {"unknown": _("Unknown error."),
"invalid-site-public-key": _("Unable to verify public key."),
"invalid-site-private-key": _("Unable to verify private key."),
"invalid-request-cookie": _("The challenge parameter was filled incorrectly."),
"incorrect-captcha-sol": _("Invalid Captcha solution."),
"verify-params-incorrect": _("Make sure you are passing all the required parameters."),
"invalid-referrer": _("Invalid Referrer. Enter the correct keys for this domain"),
"recaptcha-not-reachable": _("The reCaptcha site seems to be down. Sorry!!!")}
widget = RecaptchaWidget
def verify(self, data):
captcha_req = urllib2.Request(VERIFY_SERVER,
data=urllib.urlencode({'privatekey': settings.RECAPTCHA_PRIVATE_KEY,
'remoteip' : data['remoteip'],
'challenge' : data['recaptcha_challenge_field'],
'response' : data['recaptcha_response_field'],}))
try:
response = urllib2.urlopen(captcha_req)
except urllib2.URLError,e :
raise forms.ValidationError(e)
resp_content = response.readlines()
return_code = resp_content[0].strip()
error = resp_content[1].strip()
if not return_code == "true":
raise forms.ValidationError(self.error_messages.get(error) or error)
| theju/django-comments-apps | recaptcha_comments/fields.py | Python | mit | 3,024 |
#!/usr/bin/python
from typing import List
"""
31. Next Permutation
https://leetcode.com/problems/next-permutation/
"""
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
best = None
for i in range(len(nums) - 1):
smallest = 9999999
for j in range(i + 1, len(nums)):
if (nums[i] < nums[j] and nums[j] < smallest):
smallest = nums[j]
best = i, j
if not best:
nums[:] = sorted(nums)
else:
i, j = best
nums[i], nums[j] = nums[j], nums[i]
nums[:] = nums[:i+1] + sorted(nums[i+1:])
def main():
sol = Solution()
# [4, 2, 2, 0, 0, 2, 3]
# [4, 2, 0, 2, 3, 0, 2]
# [4, 2, 0, 3, 0, 2, 2]
a = [4, 2, 0, 2, 3, 2, 0]
sol.nextPermutation(a)
print(a)
return 0
if __name__ == '__main__':
raise SystemExit(main())
| pisskidney/leetcode | medium/31.py | Python | mit | 1,015 |
from __future__ import print_function
import linecache
import sys
import numpy
from six import iteritems
from theano import config
from theano.compat import OrderedDict, PY3
def simple_extract_stack(f=None, limit=None, skips=[]):
"""This is traceback.extract_stack from python 2.7 with this change:
- Comment the update of the cache.
- Skip internal stack trace level.
The update of the cache call os.stat to verify is the cache is up
to date. This take too much time on cluster.
limit - The number of stack level we want to return. If None, mean
all what we can.
skips - partial path of stack level we don't want to keep and count.
When we find one level that isn't skipped, we stop skipping.
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
trace = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
# linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
f = f.f_back
# Just skip inner level
if len(trace) == 0:
rm = False
for p in skips:
# Julian: I added the 'tests' exception together with
# Arnaud. Otherwise, we'd lose the stack trace during
# in our test cases (e.g. in test_opt.py). We're not
# sure this is the right way to do it though.
if p in filename and 'tests' not in filename:
rm = True
break
if rm:
continue
trace.append((filename, lineno, name, line))
n = n + 1
trace.reverse()
return trace
def add_tag_trace(thing, user_line=None):
"""
Add tag.trace to an node or variable.
The argument is returned after being affected (inplace).
Parameters
----------
thing
The object where we add .tag.trace.
user_line
The max number of user line to keep.
Notes
-----
We alse use config.traceback.limit for the maximum number of stack level
we look.
"""
if user_line is None:
user_line = config.traceback.limit
if user_line == -1:
user_line = None
skips = ["theano/tensor/", "theano\\tensor\\",
"theano/compile/", "theano\\compile\\",
"theano/gof/", "theano\\gof\\",
"theano/scalar/basic.py", "theano\\scalar\\basic.py",
"theano/sandbox/", "theano\\sandbox\\",
"theano/scan_module/", "theano\\scan_module\\",
"theano/sparse/", "theano\\sparse\\",
"theano/typed_list/", "theano\\typed_list\\"]
tr = simple_extract_stack(limit=user_line, skips=skips)
# Different python version use different sementic for
# limit. python 2.7 include the call to extrack_stack. The -1 get
# rid of it.
if tr:
thing.tag.trace = [tr]
else:
thing.tag.trace = tr
return thing
def hashtype(self):
t = type(self)
return hash(t.__name__) ^ hash(t.__module__)
# Object to mark that a parameter is undefined (useful in cases where
# None is a valid value with defined semantics)
undef = object()
class MethodNotDefined(Exception):
"""
To be raised by functions defined as part of an interface.
When the user sees such an error, it is because an important interface
function has been left out of an implementation class.
"""
class object2(object):
__slots__ = []
if 0:
def __hash__(self):
# this fixes silent-error-prone new-style class behavior
if hasattr(self, '__eq__') or hasattr(self, '__cmp__'):
raise TypeError("unhashable object: %s" % self)
return id(self)
def __ne__(self, other):
return not self == other
class scratchpad:
def clear(self):
self.__dict__.clear()
def __update__(self, other):
self.__dict__.update(other.__dict__)
return self
def __str__(self):
return "scratchpad" + str(self.__dict__)
def __repr__(self):
return "scratchpad" + str(self.__dict__)
def info(self):
print("<theano.gof.utils.scratchpad instance at %i>" % id(self))
for k, v in iteritems(self.__dict__):
print(" %s: %s" % (k, v))
class D:
def __init__(self, **d):
self.__dict__.update(d)
def memoize(f):
"""
Cache the return value for each tuple of arguments (which must be hashable).
"""
cache = {}
def rval(*args, **kwargs):
kwtup = tuple(kwargs.items())
key = (args, kwtup)
if key not in cache:
val = f(*args, **kwargs)
cache[key] = val
else:
val = cache[key]
return val
return rval
def deprecated(filename, msg=''):
"""
Decorator which will print a warning message on the first call.
Use it like this::
@deprecated('myfile', 'do something different...')
def fn_name(...)
...
And it will print::
WARNING myfile.fn_name deprecated. do something different...
"""
def _deprecated(f):
printme = [True]
def g(*args, **kwargs):
if printme[0]:
print('WARNING: %s.%s deprecated. %s' %
(filename, f.__name__, msg))
printme[0] = False
return f(*args, **kwargs)
return g
return _deprecated
def uniq(seq):
"""
Do not use set, this must always return the same value at the same index.
If we just exchange other values, but keep the same pattern of duplication,
we must keep the same order.
"""
# TODO: consider building a set out of seq so that the if condition
# is constant time -JB
return [x for i, x in enumerate(seq) if seq.index(x) == i]
def difference(seq1, seq2):
"""
Returns all elements in seq1 which are not in seq2: i.e ``seq1\seq2``.
"""
try:
# try to use O(const * len(seq1)) algo
if len(seq2) < 4: # I'm guessing this threshold -JB
raise Exception('not worth it')
set2 = set(seq2)
return [x for x in seq1 if x not in set2]
except Exception:
# maybe a seq2 element is not hashable
# maybe seq2 is too short
# -> use O(len(seq1) * len(seq2)) algo
return [x for x in seq1 if x not in seq2]
def to_return_values(values):
if len(values) == 1:
return values[0]
else:
return values
def from_return_values(values):
if isinstance(values, (list, tuple)):
return values
else:
return [values]
def toposort(prereqs_d):
"""
Sorts prereqs_d.keys() topologically.
prereqs_d[x] contains all the elements that must come before x
in the ordering.
"""
# all1 = set(prereqs_d.keys())
# all2 = set()
# for x, y in iteritems(prereqs_d):
# all2.update(y)
# print all1.difference(all2)
seq = []
done = set()
postreqs_d = {}
for x, prereqs in iteritems(prereqs_d):
for prereq in prereqs:
postreqs_d.setdefault(prereq, set()).add(x)
next = set([k for k in prereqs_d if not prereqs_d[k]])
while next:
bases = next
next = set()
for x in bases:
done.add(x)
seq.append(x)
for x in bases:
for postreq in postreqs_d.get(x, []):
if not prereqs_d[postreq].difference(done):
next.add(postreq)
if len(prereqs_d) != len(seq):
raise Exception("Cannot sort topologically: there might be cycles, "
"prereqs_d does not have a key for each element or "
"some orderings contain invalid elements.")
return seq
class Keyword:
def __init__(self, name, nonzero=True):
self.name = name
self.nonzero = nonzero
def __nonzero__(self):
# Python 2.x
return self.__bool__()
def __bool__(self):
# Python 3.x
return self.nonzero
def __str__(self):
return "<%s>" % self.name
def __repr__(self):
return "<%s>" % self.name
ABORT = Keyword("ABORT", False)
RETRY = Keyword("RETRY", False)
FAILURE = Keyword("FAILURE", False)
simple_types = (int, float, str, bool, None.__class__, Keyword)
ANY_TYPE = Keyword("ANY_TYPE")
FALL_THROUGH = Keyword("FALL_THROUGH")
def comm_guard(type1, type2):
def wrap(f):
old_f = f.__globals__[f.__name__]
def new_f(arg1, arg2, *rest):
if ((type1 is ANY_TYPE or isinstance(arg1, type1)) and
(type2 is ANY_TYPE or isinstance(arg2, type2))):
pass
elif ((type1 is ANY_TYPE or isinstance(arg2, type1)) and
(type2 is ANY_TYPE or isinstance(arg1, type2))):
arg1, arg2 = arg2, arg1
else:
return old_f(arg1, arg2, *rest)
variable = f(arg1, arg2, *rest)
if variable is FALL_THROUGH:
return old_f(arg1, arg2, *rest)
else:
return variable
new_f.__name__ = f.__name__
def typename(type):
if isinstance(type, Keyword):
return str(type)
elif isinstance(type, (tuple, list)):
return "(" + ", ".join([x.__name__ for x in type]) + ")"
else:
return type.__name__
new_f.__doc__ = (str(old_f.__doc__) + "\n" +
", ".join([typename(type)
for type in (type1, type2)]) +
"\n" + str(f.__doc__ or ""))
return new_f
return wrap
def type_guard(type1):
def wrap(f):
old_f = f.__globals__[f.__name__]
def new_f(arg1, *rest):
if (type1 is ANY_TYPE or isinstance(arg1, type1)):
variable = f(arg1, *rest)
if variable is FALL_THROUGH:
return old_f(arg1, *rest)
else:
return variable
else:
return old_f(arg1, *rest)
new_f.__name__ = f.__name__
def typename(type):
if isinstance(type, Keyword):
return str(type)
elif isinstance(type, (tuple, list)):
return "(" + ", ".join([x.__name__ for x in type]) + ")"
else:
return type.__name__
new_f.__doc__ = (str(old_f.__doc__) + "\n" +
", ".join([typename(type) for type in (type1,)]) +
"\n" + str(f.__doc__ or ""))
return new_f
return wrap
def flatten(a):
"""
Recursively flatten tuple, list and set in a list.
"""
if isinstance(a, (tuple, list, set)):
l = []
for item in a:
l.extend(flatten(item))
return l
else:
return [a]
def unique(x):
return len(set(x)) == len(x)
def hist(coll):
counts = {}
for elem in coll:
counts[elem] = counts.get(elem, 0) + 1
return counts
def give_variables_names(variables):
"""
Gives unique names to an iterable of variables. Modifies input.
This function is idempotent.
"""
names = [var.name for var in variables]
h = hist(names)
def bad_var(var):
return not var.name or h[var.name] > 1
for i, var in enumerate(filter(bad_var, variables)):
var.name = (var.name or "") + "_%d" % i
if not unique([str(v) for v in variables]):
raise ValueError("Not all variables have unique names. Maybe you've "
"named some of the variables identically")
return variables
def remove(predicate, coll):
"""
Return those items of collection for which predicate(item) is true.
Examples
--------
>>> def even(x):
... return x % 2 == 0
>>> remove(even, [1, 2, 3, 4])
[1, 3]
"""
return [x for x in coll if not predicate(x)]
if PY3:
import hashlib
def hash_from_code(msg):
# hashlib.md5() requires an object that supports buffer interface,
# but Python 3 (unicode) strings don't.
if isinstance(msg, str):
msg = msg.encode()
# Python 3 does not like module names that start with
# a digit.
return 'm' + hashlib.md5(msg).hexdigest()
else:
import hashlib
def hash_from_code(msg):
try:
return hashlib.md5(msg).hexdigest()
except TypeError:
assert isinstance(msg, numpy.ndarray)
return hashlib.md5(numpy.getbuffer(msg)).hexdigest()
def hash_from_file(file_path):
"""
Return the MD5 hash of a file.
"""
return hash_from_code(open(file_path, 'rb').read())
def hash_from_dict(d):
"""
Work around the fact that dict are not hashable in python.
This request that all object have a sorted order that depend only
on the key of the object. We support only integer/float/string keys.
Also, we transform values that are list into tuple as list are not
hashable.
Notes
-----
Special case for OrderedDict, it use the order of the dict,
so the key don't need to be sortable.
"""
if isinstance(d, OrderedDict):
items = list(iteritems(d))
else:
items = list(d.items())
items.sort()
first_part = [k for k, v in items]
second_part = []
for k, v in items:
assert isinstance(k, (str, int, float))
if isinstance(v, (tuple, list)):
second_part += [tuple(v)]
else:
second_part += [v]
tuple_items = tuple(first_part + second_part + [d.__class__])
return hash(tuple_items)
| rizar/attention-lvcsr | libs/Theano/theano/gof/utils.py | Python | mit | 14,120 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnLinkConnectionsOperations:
"""VpnLinkConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_vpn_connection(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnSiteLinkConnectionsResult"]:
"""Retrieves all vpn site link connections for a particular virtual wan vpn gateway vpn
connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSiteLinkConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.ListVpnSiteLinkConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSiteLinkConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_connection.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSiteLinkConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_vpn_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations/_vpn_link_connections_operations.py | Python | mit | 5,986 |
'''
TD_Sequence in class.
'''
import numpy as np
import pandas as pd
import json
import pandas.io.data as web
from datetime import date, datetime, timedelta
from collections import defaultdict
class TDSequence(object):
def __init__(self, data):
self.data = data
def sequence(self):
setup = self.data.iloc[-1]['Close'] - self.data.iloc[-1-4]['Close']
buy_setup = True
buy_counter = 1
sell_counter = -1
if setup < 0:
'''buy setup'''
buy_setup = True
elif setup > 0:
'''sell setup'''
buy_setup = False
for i in xrange(1,(len(self.data))):
if buy_setup:
buy = self.data.iloc[-1-i]['Close'] - self.data.iloc[-5-i]['Close']
if buy < 0:
buy_counter += 1
if buy_counter > 9:
'''failed to reverse, reset buy counter back to 1'''
buy_counter = 1
if buy_counter == 9 and ((self.data.iloc[-2-i]['Close'] - self.data.iloc[-6-i]['Close'])>0):
if ((self.data.iloc[-1]['Low'] <= self.data.iloc[-3]['Low']) and (self.data.iloc[-1]['Low'] <= self.data.iloc[-4]['Low'])) or \
((self.data.iloc[-2]['Low'] <= self.data.iloc[-3]['Low']) and (self.data.iloc[-2]['Low'] <= self.data.iloc[-4]['Low'])):
buy_counter = 10
return buy_counter
else:
return buy_counter
else:
if (buy_counter == 8) and ((self.data.iloc[-2]['Low'] <= self.data.iloc[-3]['Low']) and (self.data.iloc[-2]['Low'] <= self.data.iloc[-4]['Low'])):
buy_counter = 8.5
return 8.5
else:
return buy_counter
else:
sell = self.data.iloc[-1-i]['Close'] - self.data.iloc[-5-i]['Close']
if sell > 0:
sell_counter -= 1
if sell_counter < -9:
'''failed to reverse, reset buy counter back to -1'''
sell_counter = -1
if sell_counter == -9 and ((self.data.iloc[-2-i]['Close'] - self.data.iloc[-6-i]['Close'])<0):
if ((self.data.iloc[-1]['High'] > self.data.iloc[-3]['High']) and (self.data.iloc[-1]['High'] > self.data.iloc[-4]['High'])) or \
((self.data.iloc[-2]['High'] > self.data.iloc[-3]['High']) and (self.data.iloc[-2]['High'] > self.data.iloc[-4]['High'])):
sell_counter = -10
return sell_counter
else:
return sell_counter
else:
if sell_counter == -8 and ((self.data.iloc[-2]['High'] > self.data.iloc[-3]['High']) and (self.data.iloc[-2]['High'] > self.data.iloc[-4]['High'])):
sell_counter = -8.5
return -8.5
else:
return sell_counter
| kennethcc2005/yahoo_finance_stocks | td_sequence.py | Python | mit | 3,188 |
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
"""docstring for User"""
__tablename__ = 'users'
id = Column(Integer, primary_key = True)
name = Column(String(250), nullable = False)
email = Column(String(250), nullable = False)
picture = Column(String(250))
role = Column(String(5))
class Category(Base):
"""docstring for Category"""
__tablename__ = 'category'
id = Column(Integer, primary_key = True)
name = Column(String(80), nullable = False)
@property
def serialize(self):
return {
'id': self.id,
'name': self.name
}
class Item(Base):
"""docstring for Item"""
__tablename__ = 'item'
id = Column(Integer, primary_key = True)
title = Column(String(250), nullable = False)
description = Column(String())
picture = Column(String(250))
price = Column(String(10))
category_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'title': self.title,
'description': self.description,
'picture': self.picture,
'category_id': self.category_id
}
engine = create_engine('postgresql://catalog:catalog@localhost/catalog')
Base.metadata.create_all(engine)
| Morenito88/full_stack_p5 | full_stack_p3/db/setup.py | Python | mit | 1,516 |
from ..base import ShopifyResource
class GiftCardAdjustment(ShopifyResource):
_prefix_source = "/admin/gift_cards/$gift_card_id/"
_plural = "adjustments"
_singular = "adjustment"
| Shopify/shopify_python_api | shopify/resources/gift_card_adjustment.py | Python | mit | 193 |
#!/usr/bin/python
#Master-Thesis dot parsing framework (PING MODULE)
#Date: 14.01.2014
#Author: Bruno-Johannes Schuetze
#uses python 2.7.6
#uses the djikstra algorithm implemented by David Eppstein
#Module does calculations to behave similar to ping, uses delay label defined in the dot file
from libraries.dijkstra import *
def getSingleValue(src, dst, edgeCostHash):
return edgeCostHash[(src*100000)+dst]
def getPathTotal(start, end, edgeCostHash, networkDict):
#get shortest path between start and end
shortPathList = shortestPath(networkDict, start, end)
print "WE PINGING SHAWTY", shortPathList
| bschutze/ALTO-framework-sim | Views/ping_.py | Python | mit | 611 |
import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="smoothing", parent_name="contourcarpet.line", **kwargs
):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
max=kwargs.pop("max", 1.3),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/contourcarpet/line/_smoothing.py | Python | mit | 505 |
import sys
class outPip(object):
def __init__(self, fileDir):
self.fileDir = fileDir
self.console = sys.stdout
def write(self, s):
self.console.write(s)
with open(self.fileDir, 'a') as f: f.write(s)
def flush(self):
self.console.flush()
new_input = input
def inPip(fileDir):
def _input(hint):
s = new_input(hint)
with open(fileDir, 'a') as f: f.write(s)
return s
return _input
sys.stdout = outPip('out.log')
input = inPip('out.log')
print('This will appear on your console and your file.')
print('So is this line.')
input('yo')
| littlecodersh/EasierLife | Scripts/LogInput&Output/py3.py | Python | mit | 642 |
__all__ = ["wordlists", "roles", "bnc", "processes", "verbs",
"uktous", "tagtoclass", "queries", "mergetags"]
from corpkit.dictionaries.bnc import _get_bnc
from corpkit.dictionaries.process_types import processes
from corpkit.dictionaries.process_types import verbs
from corpkit.dictionaries.roles import roles
from corpkit.dictionaries.wordlists import wordlists
from corpkit.dictionaries.queries import queries
from corpkit.dictionaries.word_transforms import taglemma
from corpkit.dictionaries.word_transforms import mergetags
from corpkit.dictionaries.word_transforms import usa_convert
roles = roles
wordlists = wordlists
processes = processes
bnc = _get_bnc
queries = queries
tagtoclass = taglemma
uktous = usa_convert
mergetags = mergetags
verbs = verbs | interrogator/corpkit | corpkit/dictionaries/__init__.py | Python | mit | 774 |
class SpellPickerController:
def render(self):
pass
_controller_class = SpellPickerController
| battlemidget/conjure-up | conjureup/controllers/spellpicker/tui.py | Python | mit | 108 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Template"""
from os import path
import jinja2
from jinja2 import FileSystemLoader, ChoiceLoader
from jinja2.exceptions import TemplateNotFound
import peanut
from peanut.utils import get_resource
class SmartLoader(FileSystemLoader):
"""A smart template loader"""
available_extension = ['.html', '.xml']
def get_source(self, environment, template):
if template is None:
raise TemplateNotFound(template)
if '.' in template:
return super(SmartLoader, self).get_source(environment, template)
for extension in SmartLoader.available_extension:
try:
filename = template + extension
return super(SmartLoader, self).get_source(environment, filename)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
class Template(object):
"""Template"""
def __init__(self, path, filters=None, **kwargs):
loader = ChoiceLoader([
SmartLoader(path),
SmartLoader(get_resource('themes/default')),
])
self.env = jinja2.Environment(
loader=loader,
lstrip_blocks=True,
trim_blocks=True,
)
# Update filters
if isinstance(filters, dict):
self.env.filters.update(filters)
# Update global namesapce
self.env.globals.update(kwargs)
def update_context(self, **kwargs):
"""Update global context
"""
self.env.globals.update(kwargs)
def render(self, name, **context):
"""Render template with name and context
"""
template = self.env.get_template(name)
return template.render(**context)
| zqqf16/Peanut | peanut/template.py | Python | mit | 1,770 |
from optparse import OptionParser
import simplejson as json
import spotify_client
import datatype
import datetime
import time
import calendar
import wiki
import omni_redis
def migrate_v1(path_in, path_out):
client = spotify_client.Client()
uris = []
with open(path_in, 'rb') as f:
for line in f:
doc = json.loads(line)
uris.append(doc['u'])
tracks = client.track_data(uris)
with open(path_out, 'wb') as f:
for t in tracks:
ts = calendar.timegm(datetime.datetime.now().utctimetuple())
t.meta = datatype.Meta(date_added=ts, last_modified=ts)
f.write('%s\n' % json.dumps(t._to_dict()))
def migrate_v2(path_in, view):
with open(path_in, 'rb') as f:
tracks = [datatype.track_from_dict(json.loads(line)) for line in f]
for t in tracks:
t.meta.date_added = t.meta.date_added or int(round(time.time()))
t.meta.last_modified = t.meta.last_modified or int(round(time.time()))
print 'putting %d tracks' % len(tracks)
omni_redis.put_view('default', view, tracks)
migrate = migrate_v2
def add_countries(path_in, path_out):
tracks = []
artist_countries = {}
with open(path_in, 'rb') as f:
for line in f:
doc = json.loads(line)
tracks.append(doc)
artist_countries[doc['a']['n']] = None
for i,artist in enumerate(artist_countries.iterkeys()):
artist_countries[artist]=wiki.country_for_artist(artist)
print '%d/%d %s: %s' % (i+1, len(artist_countries), artist, artist_countries[artist])
with open(path_out, 'wb') as f:
for t in tracks:
t['a']['c'] = artist_countries[t['a']['n']]
f.write('%s\n' % json.dumps(t))
def main():
parser = OptionParser()
parser.add_option('-i', dest='input')
parser.add_option('-o', dest='output')
parser.add_option('-w', dest='wiki', action="store_true")
options, args = parser.parse_args()
if options.wiki:
add_countries(options.input, options.output)
else:
migrate(options.input, options.output)
if __name__ == '__main__':
main() | smershon/omnisonica | omnisonica/clients/migrate.py | Python | mit | 2,160 |
import json
from PIL import Image
import collections
with open('../config/nodes.json') as data_file:
nodes = json.load(data_file)
# empty fucker
ordered_nodes = [None] * len(nodes)
# populate fucker
for i, pos in nodes.items():
ordered_nodes[int(i)] = [pos['x'], pos['y']]
filename = "04_rgb_vertical_lines"
im = Image.open("../gif_generators/output/"+filename+".gif") #Can be many different formats.
target_size = 400, 400
resize = False
if target_size != im.size:
resize = True
data = []
# To iterate through the entire gif
try:
frame_num = 0
while True:
im.seek(frame_num)
frame_data = []
# do something to im
img = im.convert('RGB')
if resize == True:
print "Resizing"
img.thumbnail(target_size, Image.ANTIALIAS)
for x, y in ordered_nodes:
frame_data.append(img.getpixel((x, y)))
#print r, g, b
data.append(frame_data)
# write to json
print frame_num
frame_num+=1
except EOFError:
pass # end of sequence
#print data
#print r, g, b
with open(filename+'.json', 'w') as outfile:
json.dump({
"meta": {},
"data": data
}, outfile)
print im.size #Get the width and hight of the image for iterating over
#print pix[,y] #Get the RGBA Value of the a pixel of an image
| Ibuprofen/gizehmoviepy | gif_parsers/read_rgb.py | Python | mit | 1,281 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class GoogleAppSetup(Document):
pass
| saurabh6790/google_integration | google_integration/google_connect/doctype/google_app_setup/google_app_setup.py | Python | mit | 280 |
from node import Node
from fern.ast.tools import simplify, ItemStream
from fern.primitives import Undefined
class List(Node):
def __init__(self):
Node.__init__(self)
self.children = []
self.value = None
def put(self, thingy):
if isinstance(thingy, ItemStream):
for it in thingy:
self.put_item(it)
else:
self.put_item(thingy)
def put_item(self, item):
if not item is Undefined:
self.reparent(item)
self.children.append(item)
self.invalidate()
def __getitem__(self, index):
self.refresh()
return self.value[index]
def refresh_impl(self):
self.value = []
for child in self.children:
result = simplify(child)
if isinstance(result, ItemStream):
for it in result:
self.value.append(it)
else:
self.value.append(result)
def get_children(self):
return self.children
| andrewf/fern | fern/ast/list.py | Python | mit | 1,034 |
# -*- coding: utf-8 -*-
#
from __future__ import print_function
import warnings
import numpy
import pytest
import sympy
from dolfin import (
MPI,
Constant,
DirichletBC,
Expression,
FunctionSpace,
UnitSquareMesh,
errornorm,
pi,
triangle,
)
import helpers
import matplotlib.pyplot as plt
from maelstrom import heat
MAX_DEGREE = 5
def problem_sinsin():
"""cosine example.
"""
def mesh_generator(n):
return UnitSquareMesh(n, n, "left/right")
x = sympy.DeferredVector("x")
# Choose the solution such that the boundary conditions are fulfilled
# exactly. Also, multiply with x**2 to make sure that the right-hand side
# doesn't contain the term 1/x. Although it looks like a singularity at
# x=0, this terms is esentially harmless since the volume element 2*pi*x is
# used throughout the code, canceling out with the 1/x. However, Dolfin has
# problems with this, cf.
# <https://bitbucket.org/fenics-project/dolfin/issues/831/some-problems-with-quadrature-expressions>.
solution = {
"value": x[0] ** 2 * sympy.sin(pi * x[0]) * sympy.sin(pi * x[1]),
"degree": MAX_DEGREE,
}
# Produce a matching right-hand side.
phi = solution["value"]
kappa = 2.0
rho = 3.0
cp = 5.0
conv = [1.0, 2.0]
rhs_sympy = sympy.simplify(
-1.0 / x[0] * sympy.diff(kappa * x[0] * sympy.diff(phi, x[0]), x[0])
- 1.0 / x[0] * sympy.diff(kappa * x[0] * sympy.diff(phi, x[1]), x[1])
+ rho * cp * conv[0] * sympy.diff(phi, x[0])
+ rho * cp * conv[1] * sympy.diff(phi, x[1])
)
rhs = {
"value": Expression(helpers.ccode(rhs_sympy), degree=MAX_DEGREE),
"degree": MAX_DEGREE,
}
return mesh_generator, solution, rhs, triangle, kappa, rho, cp, Constant(conv)
@pytest.mark.parametrize("problem", [problem_sinsin])
@pytest.mark.parametrize("stabilization", [None, "supg"])
def test_order(problem, stabilization):
"""Assert the correct discretization order.
"""
mesh_sizes = [16, 32, 64]
errors, hmax = _compute_errors(problem, mesh_sizes, stabilization)
# Compute the numerical order of convergence.
order = helpers.compute_numerical_order_of_convergence(hmax, errors)
# The test is considered passed if the numerical order of convergence
# matches the expected order in at least the first step in the coarsest
# spatial discretization, and is not getting worse as the spatial
# discretizations are refining.
tol = 0.1
expected_order = 2.0
assert (order > expected_order - tol).all()
return
def _compute_errors(problem, mesh_sizes, stabilization):
mesh_generator, solution, f, cell_type, kappa, rho, cp, conv = problem()
if solution["degree"] > MAX_DEGREE:
warnings.warn(
"Expression degree ({}) > maximum degree ({}). Truncating.".format(
solution["degree"], MAX_DEGREE
)
)
degree = MAX_DEGREE
else:
degree = solution["degree"]
sol = Expression(
helpers.ccode(solution["value"]), t=0.0, degree=degree, cell=cell_type
)
errors = numpy.empty(len(mesh_sizes))
hmax = numpy.empty(len(mesh_sizes))
for k, mesh_size in enumerate(mesh_sizes):
mesh = mesh_generator(mesh_size)
hmax[k] = MPI.max(MPI.comm_world, mesh.hmax())
Q = FunctionSpace(mesh, "CG", 1)
prob = heat.Heat(
Q,
kappa=kappa,
rho=rho,
cp=cp,
convection=conv,
source=f["value"],
dirichlet_bcs=[DirichletBC(Q, 0.0, "on_boundary")],
stabilization=stabilization,
)
phi_approx = prob.solve_stationary()
errors[k] = errornorm(sol, phi_approx)
return errors, hmax
def _show_order_info(problem, mesh_sizes, stabilization):
"""Performs consistency check for the given problem/method combination and
show some information about it. Useful for debugging.
"""
errors, hmax = _compute_errors(problem, mesh_sizes, stabilization)
order = helpers.compute_numerical_order_of_convergence(hmax, errors)
# Print the data
print()
print("hmax ||u - u_h|| conv. order")
print("{:e} {:e}".format(hmax[0], errors[0]))
for j in range(len(errors) - 1):
print(32 * " " + "{:2.5f}".format(order[j]))
print("{:e} {:e}".format(hmax[j + 1], errors[j + 1]))
# Plot the actual data.
plt.loglog(hmax, errors, "-o")
# Compare with order curves.
plt.autoscale(False)
e0 = errors[0]
for order in range(4):
plt.loglog(
[hmax[0], hmax[-1]], [e0, e0 * (hmax[-1] / hmax[0]) ** order], color="0.7"
)
plt.xlabel("hmax")
plt.ylabel("||u-u_h||")
plt.show()
return
if __name__ == "__main__":
# mesh_sizes_ = [16, 32, 64, 128]
# _show_order_info(problem_sinsin, mesh_sizes_, None)
test_order(problem_sinsin, "supg")
| nschloe/maelstrom | test/test_poisson_order.py | Python | mit | 4,993 |
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
def recurhelper(nums,res,path,target,start):
if target==0:
res.append(path)
return
if target<0:
return
if target>0:
for i in xrange(start,len(nums)):
if nums[i]<=target:
recurhelper(nums,res,path+[nums[i]],target-nums[i],i)
res=[]
candidates.sort()
recurhelper(candidates,res,[],target,0)
return res | Tanych/CodeTracking | 39-Combination-Sum/solution.py | Python | mit | 677 |
#!/usr/bin/env python
"""Converts the .ics/.ical file into a FullCalendar compatiable JSON file
FullCalendar uses a specific JSON format similar to iCalendar format. This
script creates a JSON file containing renamed event components. Only the
title, description, start/end time, and url data are used. Does not support
repeating events.
"""
import sys
import json
__import__('pytz')
__import__('icalendar')
from icalendar import Calendar
__author__ = "Andy Yin"
__copyright__ = "Copyright (C) 2015, Andy Yin"
__credits__ = ["Eddie Blundell"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Andy Yin"
__email__ = "[email protected]"
__status__ = "Production"
# quit if the arguments are incorrect, and prints a usage
if (len(sys.argv) != 2 and len(sys.argv) != 3):
print sys.argv[0] + ': illegal operation'
print 'usage: python ' + sys.argv[0] + ' file [output]'
exit(1)
# default output filename (just adds .json extension on the given file)
out_file = sys.argv[1] + '.json'
if (len(sys.argv) == 3):
# changes output filename to the 2nd arugment
out_file = sys.argv[2]
# opens the input .ics file and parses it as iCalendar Calendar object
ics_file = open(sys.argv[1],'rb')
ics_cal = Calendar.from_ical(ics_file.read())
# array of event information
result = []
for component in ics_cal.walk():
if component.name == "VEVENT":
# set containing all the events
event = {
'title':component.get('summary'),
'backgroundColor':component.get('location'),
'description':component.get('description'),
'start':component.decoded('dtstart').isoformat(),
'end':component.decoded('dtend').isoformat(),
'url':component.get('url')
}
# append to the result array
result.append(event)
ics_file.close()
# saves the result using jsonify
json_out = open(out_file, 'w')
json_out.write(json.dumps(result, sort_keys = False, indent = 4))
json_out.close() | ForTheYin/ical2fullcalendar | ics-convert.py | Python | mit | 1,883 |
from __future__ import absolute_import, unicode_literals
from django.core.paginator import Paginator
from django.core import urlresolvers
from django.utils.html import mark_safe, escape
import django_tables2 as tables
from django_tables2.tables import Table
from django_tables2.utils import Accessor as A, AttributeDict
class ActionsColumn(tables.Column):
"""
This column allows you to pass in a list of links that will form an Action Column
"""
empty_values = ()
links = None
delimiter = None
def __init__(self, links=None, delimiter=' | ', **kwargs):
super(ActionsColumn, self).__init__(**kwargs)
self.orderable = False
self.delimiter = delimiter
if links is not None:
self.links = links
def render(self, value, record, bound_column):
if not self.links:
raise NotImplementedError('Links not assigned.')
if not isinstance(self.links, (list, tuple,dict)):
raise NotImplementedError('Links must be an iterable.')
links = []
for link in self.links:
title = link['title']
url = link['url']
attrs = link['attrs'] if 'attrs' in link else None
if 'args' in link:
args = [a.resolve(record) if isinstance(a, A) else a for a in link['args']]
else:
args = None
attrs = AttributeDict(attrs if attrs is not None else self.attrs.get('a', {}))
try:
attrs['href'] = urlresolvers.reverse(url, args=args)
except urlresolvers.NoReverseMatch:
attrs['href'] = url
links.append('<a {attrs}>{text}</a>'.format(
attrs=attrs.as_html(),
text=mark_safe(title)
))
return mark_safe(self.delimiter.join(links))
class PaginateTable(Table):
"""Generic table class that makes use of Django's built in paginate functionality"""
def __init__(self, *args, **kwargs):
super(PaginateTable, self).__init__(*args, **kwargs)
self.template = kwargs.get('template', 'fancy_paged_tables/table.html')
def paginate(self, klass=Paginator, per_page=None, page=1, *args, **kwargs):
"""
Paginates the table using a paginator and creates a ``page`` property
containing information for the current page.
:type klass: Paginator class
:param klass: a paginator class to paginate the results
:type per_page: `int`
:param per_page: how many records are displayed on each page
:type page: `int`
:param page: which page should be displayed.
Extra arguments are passed to the paginator.
Pagination exceptions (`~django.core.paginator.EmptyPage` and
`~django.core.paginator.PageNotAnInteger`) may be raised from this
method and should be handled by the caller.
"""
self.per_page_options = [20, 50, 100, 200] # This should probably be a passed in option
self.per_page = per_page = per_page or self._meta.per_page
self.paginator = klass(self.rows, per_page, *args, **kwargs)
self.page = self.paginator.page(page)
# Calc variables for use in displaying first, adjacent, and last page links
adjacent_pages = 1 # This should probably be a passed in option
# Starting page (first page between the ellipsis)
start_page = max(self.page.number - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
# Ending page (last page between the ellipsis)
end_page = self.page.number + adjacent_pages + 1
if end_page >= self.paginator.num_pages - 1:
end_page = self.paginator.num_pages + 1
# Paging vars used in template
self.page_numbers = [n for n in range(start_page, end_page) if 0 < n <= self.paginator.num_pages]
self.show_first = 1 not in self.page_numbers
self.show_last = self.paginator.num_pages not in self.page_numbers
| naphthalene/hubcave | hubcave/core/mixins/tables.py | Python | mit | 4,047 |
# -*- coding: utf-8 -*-
import copy
from ruamel.yaml import YAML
from six import iteritems
_required = ['server']
class Config(object):
def __init__(self, configFile):
self.configFile = configFile
self._configData = {}
self.yaml = YAML()
self._inBaseConfig = []
def loadConfig(self):
configData = self._readConfig(self.configFile)
self._validate(configData)
self._configData = configData
def _readConfig(self, fileName):
try:
with open(fileName, mode='r') as config:
configData = self.yaml.load(config)
if not configData:
configData = {}
# if this is the base server config, store what keys we loaded
if fileName == self.configFile:
self._inBaseConfig = list(configData.keys())
except Exception as e:
raise ConfigError(fileName, e)
if 'import' not in configData:
return configData
for fname in configData['import']:
includeConfig = self._readConfig('configs/{}.yaml'.format(fname))
for key, val in iteritems(includeConfig):
# not present in base config, just assign it
if key not in configData:
configData[key] = val
continue
# skip non-collection types that are already set
if isinstance(configData[key], (str, int)):
continue
if isinstance(val, str):
raise ConfigError(fname, 'The included config file tried '
'to merge a non-string with a '
'string')
try:
iter(configData[key])
iter(val)
except TypeError:
# not a collection, so just don't merge them
pass
else:
try:
# merge with + operator
configData[key] += val
except TypeError:
# dicts can't merge with +
try:
for subKey, subVal in iteritems(val):
if subKey not in configData[key]:
configData[key][subKey] = subVal
except (AttributeError, TypeError):
# if either of these, they weren't both dicts.
raise ConfigError(fname, 'The variable {!r} could '
'not be successfully '
'merged'.format(key))
return configData
def writeConfig(self):
# filter the configData to only those keys
# that were present in the base server config,
# or have been modified at runtime
configData = copy.deepcopy(self._configData)
to_delete = set(configData.keys()).difference(self._inBaseConfig)
for key in to_delete:
del configData[key]
# write the filtered configData
try:
with open(self.configFile, mode='w') as config:
self.yaml.dump(configData, config)
except Exception as e:
raise ConfigError(self.configFile, e)
def getWithDefault(self, key, default=None):
if key in self._configData:
return self._configData[key]
return default
def _validate(self, configData):
for key in _required:
if key not in configData:
raise ConfigError(self.configFile, 'Required item {!r} was not found in the config.'.format(key))
def __len__(self):
return len(self._configData)
def __iter__(self):
return iter(self._configData)
def __getitem__(self, key):
return self._configData[key]
def __setitem__(self, key, value):
# mark this key to be saved in the server config
if key not in self._inBaseConfig:
self._inBaseConfig.append(key)
self._configData[key] = value
def __contains__(self, key):
return key in self._configData
class ConfigError(Exception):
def __init__(self, configFile, message):
self.configFile = configFile
self.message = message
def __str__(self):
return 'An error occurred while reading config file {}: {}'.format(self.configFile,
self.message)
| MatthewCox/PyMoronBot | pymoronbot/config.py | Python | mit | 4,668 |
"""Requirements specific to SQLAlchemy's own unit tests.
"""
from sqlalchemy import util
import sys
from sqlalchemy.testing.requirements import SuiteRequirements
from sqlalchemy.testing import exclusions
from sqlalchemy.testing.exclusions import \
skip, \
skip_if,\
only_if,\
only_on,\
fails_on_everything_except,\
fails_on,\
fails_if,\
succeeds_if,\
SpecPredicate,\
against,\
LambdaPredicate,\
requires_tag
def no_support(db, reason):
return SpecPredicate(db, description=reason)
def exclude(db, op, spec, description=None):
return SpecPredicate(db, op, spec, description=description)
class DefaultRequirements(SuiteRequirements):
@property
def deferrable_or_no_constraints(self):
"""Target database must support deferrable constraints."""
return skip_if([
no_support('firebird', 'not supported by database'),
no_support('mysql', 'not supported by database'),
no_support('mssql', 'not supported by database'),
])
@property
def check_constraints(self):
"""Target database must support check constraints."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return skip_if([
no_support('sqlite', 'not supported by database'),
])
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return skip_if(
no_support('sqlite', 'not supported by database')
)
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return skip_if(
['sqlite', 'oracle'],
'target backend %(doesnt_support)s ON UPDATE CASCADE'
)
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + \
skip_if('mssql')
@property
def deferrable_fks(self):
"""target database must support deferrable fks"""
return only_on(['oracle'])
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return skip_if([
"firebird", "oracle", "mysql"
], "not supported by database"
)
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return skip_if([
no_support('firebird', 'not supported by database'),
no_support('oracle', 'not supported by database'),
no_support('mssql', 'not supported by database'),
no_support('sybase', 'not supported by database'),
])
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return skip_if(["firebird", "mssql+mxodbc"],
"not supported by driver")
@property
def identity(self):
"""Target database must support GENERATED AS IDENTITY or a facsimile.
Includes GENERATED AS IDENTITY, AUTOINCREMENT, AUTO_INCREMENT, or other
column DDL feature that fills in a DB-generated identifier at INSERT-time
without requiring pre-execution of a SEQUENCE or other artifact.
"""
return skip_if(["firebird", "oracle", "postgresql", "sybase"],
"not supported by database"
)
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return skip_if(
["mssql", "firebird"], "not supported (?)"
)
@property
def temp_table_reflection(self):
return self.temporary_tables
@property
def reflectable_autoincrement(self):
"""Target database must support tables that can automatically generate
PKs assuming they were reflected.
this is essentially all the DBs in "identity" plus Postgresql, which
has SERIAL support. FB and Oracle (and sybase?) require the Sequence to
be explicitly added, including if the table was reflected.
"""
return skip_if(["firebird", "oracle", "sybase"],
"not supported by database"
)
@property
def insert_from_select(self):
return skip_if(
["firebird"], "crashes for unknown reason"
)
@property
def fetch_rows_post_commit(self):
return skip_if(
["firebird"], "not supported"
)
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return skip_if(["oracle", "mssql"],
"not supported by database/driver"
)
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
# adding mssql here since it doesn't support comparisons either,
# have observed generally bad behavior with binary / mssql.
return skip_if(["oracle", "mssql"],
"not supported by database/driver"
)
@property
def independent_cursors(self):
"""Target must support simultaneous, independent database cursors
on a single connection."""
return skip_if(["mssql+pyodbc", "mssql+mxodbc"], "no driver support")
@property
def independent_connections(self):
"""Target must support simultaneous, independent database connections."""
# This is also true of some configurations of UnixODBC and probably win32
# ODBC as well.
return skip_if([
no_support("sqlite",
"independent connections disabled "
"when :memory: connections are used"),
exclude("mssql", "<", (9, 0, 0),
"SQL Server 2005+ is required for "
"independent connections"
)
]
)
@property
def updateable_autoincrement_pks(self):
"""Target must support UPDATE on autoincrement/integer primary key."""
return skip_if(["mssql", "sybase"],
"IDENTITY columns can't be updated")
@property
def isolation_level(self):
return only_on(
('postgresql', 'sqlite', 'mysql'),
"DBAPI has no isolation level support"
) + fails_on('postgresql+pypostgresql',
'pypostgresql bombs on multiple isolation level calls')
@property
def row_triggers(self):
"""Target must support standard statement-running EACH ROW triggers."""
return skip_if([
# no access to same table
no_support('mysql', 'requires SUPER priv'),
exclude('mysql', '<', (5, 0, 10), 'not supported by database'),
# huh? TODO: implement triggers for PG tests, remove this
no_support('postgresql',
'PG triggers need to be implemented for tests'),
])
@property
def correlated_outer_joins(self):
"""Target must support an outer join to a subquery which
correlates to the parent."""
return skip_if("oracle", 'Raises "ORA-01799: a column may not be '
'outer-joined to a subquery"')
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return only_on(['postgresql', 'mssql', 'mysql'],
"Backend does not support UPDATE..FROM")
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return fails_if('mysql', 'MySQL error 1093 "Cant specify target table '
'for update in FROM clause"')
@property
def savepoints(self):
"""Target database must support savepoints."""
return skip_if([
"sqlite",
"sybase",
("mysql", "<", (5, 0, 3)),
], "savepoints not supported")
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return skip_if([
"sqlite",
"firebird"
], "no schema support")
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign keys
"""
return only_on([
"postgresql"
])
@property
def unique_constraint_reflection(self):
return fails_on_everything_except(
"postgresql",
"mysql",
"sqlite"
)
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return only_on(['sqlite', 'oracle'])
@property
def temporary_views(self):
"""target database supports temporary views"""
return only_on(['sqlite', 'postgresql'])
@property
def update_nowait(self):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return skip_if(["firebird", "mssql", "mysql", "sqlite", "sybase"],
"no FOR UPDATE NOWAIT support"
)
@property
def subqueries(self):
"""Target database must support subqueries."""
return skip_if(exclude('mysql', '<', (4, 1, 1)), 'no subquery support')
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return only_if(
['mysql', 'sqlite', 'postgresql+psycopg2', 'mssql']
)
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return fails_if([
"firebird", "mysql", "sybase",
], 'no support for INTERSECT')
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return fails_if([
"firebird", "mysql", "sybase",
], 'no support for EXCEPT')
@property
def parens_in_union_contained_select(self):
"""Target database must support parenthesized SELECT in UNION.
E.g. (SELECT ...) UNION (SELECT ..)
"""
return fails_if('sqlite')
@property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
return fails_if([
"sybase"
], 'no support for OFFSET or equivalent')
@property
def window_functions(self):
return only_if([
"postgresql>=8.4", "mssql", "oracle"
], "Backend does not support window functions")
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return skip_if([
no_support('firebird', 'no SA implementation'),
no_support('mssql', 'two-phase xact not supported by drivers'),
no_support('oracle', 'two-phase xact not implemented in SQLA/oracle'),
no_support('drizzle', 'two-phase xact not supported by database'),
no_support('sqlite', 'two-phase xact not supported by database'),
no_support('sybase', 'two-phase xact not supported by drivers/SQLA'),
no_support('postgresql+zxjdbc',
'FIXME: JDBC driver confuses the transaction state, may '
'need separate XA implementation'),
exclude('mysql', '<', (5, 0, 3),
'two-phase xact not supported by database'),
])
@property
def views(self):
"""Target database must support VIEWs."""
return skip_if("drizzle", "no VIEW support")
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a varchar."""
return fails_if(["oracle"],
'oracle converts empty strings to a blank space')
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def unicode_data(self):
"""target drive must support unicode data stored in columns."""
return skip_if([
no_support("sybase", "no unicode driver support")
])
@property
def unicode_connections(self):
"""Target driver must support some encoding of Unicode across the wire."""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if([
exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'),
])
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if([
no_support('oracle', 'FIXME: no support in database?'),
no_support('sybase', 'FIXME: guessing, needs confirmation'),
no_support('mssql+pymssql', 'no FreeTDS support'),
LambdaPredicate(
lambda config: against(config, "mysql+mysqlconnector") and
config.db.dialect._mysqlconnector_version_info > (2, 0) and
util.py2k,
"bug in mysqlconnector 2.0"
),
LambdaPredicate(
lambda config: against(config, 'mssql+pyodbc') and
config.db.dialect.freetds and
config.db.dialect.freetds_driver_version < "0.91",
"older freetds doesn't support unicode DDL"
),
exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'),
])
@property
def sane_rowcount(self):
return skip_if(
lambda config: not config.db.dialect.supports_sane_rowcount,
"driver doesn't support 'sane' rowcount"
)
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes.
"""
return fails_on_everything_except('mysql',
'sqlite+pysqlite', 'sqlite+pysqlcipher',
'sybase', 'mssql')
@property
def implements_get_lastrowid(self):
return skip_if([
no_support('sybase', 'not supported by database'),
])
@property
def dbapi_lastrowid(self):
""""target backend includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return skip_if('mssql+pymssql', 'crashes on pymssql') + \
fails_on_everything_except('mysql',
'sqlite+pysqlite', 'sqlite+pysqlcipher')
@property
def sane_multi_rowcount(self):
return fails_if(
lambda config: not config.db.dialect.supports_sane_multi_rowcount,
"driver %(driver)s %(doesnt_support)s 'sane' multi row count"
)
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return fails_on_everything_except('postgresql', 'oracle', 'firebird')
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return fails_on_everything_except('postgresql', 'oracle', 'mssql',
'sybase')
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return fails_on_everything_except("sqlite")
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return skip_if(['mssql', 'mysql', 'firebird', '+zxjdbc',
'oracle', 'sybase'])
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(['sqlite', 'postgresql', 'firebird'])
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return fails_on('mysql+mysqlconnector')
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(['sqlite', 'postgresql', 'firebird'])
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return skip_if(['oracle'])
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return skip_if(['mssql', 'mysql', 'firebird', '+zxjdbc',
'oracle', 'sybase'])
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
# NOTE: this exclusion isn't used in current tests.
return exclusions.open()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return skip_if(
[("sybase+pyodbc", None, None,
"Don't know how do get these values through FreeTDS + Sybase"),
("firebird", None, None, "Precision must be from 1 to 18"),]
)
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return fails_if(
[('sqlite', None, None, 'TODO'),
("firebird", None, None, "Precision must be from 1 to 18"),
("sybase+pysybase", None, None, "TODO"),
('mssql+pymssql', None, None, 'FIXME: improve pymssql dec handling')]
)
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return fails_if(
[
('oracle', None, None,
"this may be a bug due to the difficulty in handling "
"oracle precision numerics"),
("firebird", None, None,
"database and/or driver truncates decimal places.")
]
)
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type."""
return fails_if([
('mysql', None, None,
'mysql FLOAT type only returns 4 decimals'),
('firebird', None, None,
"firebird FLOAT type isn't high precision"),
])
@property
def floats_to_four_decimals(self):
return fails_if([
("mysql+oursql", None, None, "Floating point error"),
("firebird", None, None,
"Firebird still has FP inaccuracy even "
"with only four decimal places"),
('mssql+pyodbc', None, None,
'mssql+pyodbc has FP inaccuracy even with '
'only four decimal places '
),
('mssql+pymssql', None, None,
'mssql+pymssql has FP inaccuracy even with '
'only four decimal places '),
(
'postgresql+pg8000', None, None,
'postgresql+pg8000 has FP inaccuracy even with '
'only four decimal places '),
(
'postgresql+psycopg2cffi', None, None,
'postgresql+psycopg2cffi has FP inaccuracy even with '
'only four decimal places '),
])
@property
def fetch_null_from_numeric(self):
return skip_if(
("mssql+pyodbc", None, None, "crashes due to bug #351"),
)
@property
def duplicate_key_raises_integrity_error(self):
return fails_on("postgresql+pg8000")
@property
def python2(self):
return skip_if(
lambda: sys.version_info >= (3,),
"Python version 2.xx is required."
)
@property
def python3(self):
return skip_if(
lambda: sys.version_info < (3,),
"Python version 3.xx is required."
)
@property
def cpython(self):
return only_if(lambda: util.cpython,
"cPython interpreter needed"
)
@property
def non_broken_pickle(self):
from sqlalchemy.util import pickle
return only_if(
lambda: not util.pypy and pickle.__name__ == 'cPickle'
or sys.version_info >= (3, 2),
"Needs cPickle+cPython or newer Python 3 pickle"
)
@property
def predictable_gc(self):
"""target platform must remove all cycles unconditionally when
gc.collect() is called, as well as clean out unreferenced subclasses.
"""
return self.cpython
@property
def hstore(self):
def check_hstore(config):
if not against(config, "postgresql"):
return False
try:
config.db.execute("SELECT 'a=>1,a=>2'::hstore;")
return True
except:
return False
return only_if(check_hstore)
@property
def range_types(self):
def check_range_types(config):
if not against(
config,
["postgresql+psycopg2", "postgresql+psycopg2cffi"]):
return False
try:
config.db.scalar("select '[1,2)'::int4range;")
return True
except:
return False
return only_if(check_range_types)
@property
def oracle_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
'sqla_testing', 'oracle_db_link'),
"oracle_db_link option not specified in config"
)
@property
def postgresql_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
'sqla_testing', 'postgres_test_db_link'),
"postgres_test_db_link option not specified in config"
)
@property
def postgresql_jsonb(self):
return skip_if(
lambda config:
config.db.dialect.driver == "pg8000" and
config.db.dialect._dbapi_version <= (1, 10, 1)
)
@property
def psycopg2_native_json(self):
return self.psycopg2_compatibility
@property
def psycopg2_native_hstore(self):
return self.psycopg2_compatibility
@property
def psycopg2_compatibility(self):
return only_on(
["postgresql+psycopg2", "postgresql+psycopg2cffi"]
)
@property
def psycopg2_or_pg8000_compatibility(self):
return only_on(
["postgresql+psycopg2", "postgresql+psycopg2cffi",
"postgresql+pg8000"]
)
@property
def percent_schema_names(self):
return skip_if(
[
(
"+psycopg2", None, None,
"psycopg2 2.4 no longer accepts percent "
"sign in bind placeholders"),
(
"+psycopg2cffi", None, None,
"psycopg2cffi does not accept percent signs in "
"bind placeholders"),
("mysql", None, None, "executemany() doesn't work here")
]
)
@property
def order_by_label_with_expression(self):
return fails_if([
('firebird', None, None, "kinterbasdb doesn't send full type information"),
('postgresql', None, None, 'only simple labels allowed'),
('sybase', None, None, 'only simple labels allowed'),
('mssql', None, None, 'only simple labels allowed')
])
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return skip_if(self._has_mysql_on_windows,
"Not supported on MySQL + Windows"
)
@property
def mssql_freetds(self):
return only_on(
LambdaPredicate(
lambda config: (
(against(config, 'mssql+pyodbc') and
config.db.dialect.freetds)
or against(config, 'mssql+pymssql')
)
)
)
@property
def no_mssql_freetds(self):
return self.mssql_freetds.not_()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return skip_if(["oracle", "firebird"], "non-standard SELECT scalar syntax")
@property
def mysql_fully_case_sensitive(self):
return only_if(self._has_mysql_fully_case_sensitive)
def _has_mysql_on_windows(self, config):
return against(config, 'mysql') and \
config.db.dialect._detect_casing(config.db) == 1
def _has_mysql_fully_case_sensitive(self, config):
return against(config, 'mysql') and \
config.db.dialect._detect_casing(config.db) == 0
@property
def postgresql_utf8_server_encoding(self):
return only_if(
lambda config: against(config, 'postgresql') and
config.db.scalar("show server_encoding").lower() == "utf8"
)
| dstufft/sqlalchemy | test/requirements.py | Python | mit | 28,475 |
from io import StringIO
class TOKEN_TYPE:
OPERATOR = 0
STRING = 1
NUMBER = 2
BOOLEAN = 3
NULL = 4
class __TOKENIZER_STATE:
WHITESPACE = 0
INTEGER_0 = 1
INTEGER_SIGN = 2
INTEGER = 3
INTEGER_EXP = 4
INTEGER_EXP_0 = 5
FLOATING_POINT_0 = 6
FLOATING_POINT = 8
STRING = 9
STRING_ESCAPE = 10
STRING_END = 11
TRUE_1 = 12
TRUE_2 = 13
TRUE_3 = 14
FALSE_1 = 15
FALSE_2 = 16
FALSE_3 = 17
FALSE_4 = 18
NULL_1 = 19
NULL_2 = 20
NULL_3 = 21
UNICODE_1 = 22
UNICODE_2 = 23
UNICODE_3 = 24
UNICODE_4 = 25
def tokenize(stream):
def is_delimiter(char):
return char.isspace() or char in "{}[]:,"
token = []
charcode = 0
completed = False
now_token = ""
def process_char(char, charcode):
nonlocal token, completed, now_token
advance = True
add_char = False
next_state = state
if state == __TOKENIZER_STATE.WHITESPACE:
if char == "{":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "{")
elif char == "}":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "}")
elif char == "[":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "[")
elif char == "]":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "]")
elif char == ",":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, ",")
elif char == ":":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, ":")
elif char == "\"":
next_state = __TOKENIZER_STATE.STRING
elif char in "123456789":
next_state = __TOKENIZER_STATE.INTEGER
add_char = True
elif char == "0":
next_state = __TOKENIZER_STATE.INTEGER_0
add_char = True
elif char == "-":
next_state = __TOKENIZER_STATE.INTEGER_SIGN
add_char = True
elif char == "f":
next_state = __TOKENIZER_STATE.FALSE_1
elif char == "t":
next_state = __TOKENIZER_STATE.TRUE_1
elif char == "n":
next_state = __TOKENIZER_STATE.NULL_1
elif not char.isspace():
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER:
if char in "0123456789":
add_char = True
elif char == ".":
next_state = __TOKENIZER_STATE.FLOATING_POINT_0
add_char = True
elif char == "e" or char == 'E':
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NUMBER, int("".join(token)))
advance = False
else:
raise ValueError("A number must contain only digits. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_0:
if char == ".":
next_state = __TOKENIZER_STATE.FLOATING_POINT_0
add_char = True
elif char == "e" or char == 'E':
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NUMBER, 0)
advance = False
else:
raise ValueError("A 0 must be followed by a '.' or a 'e'. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_SIGN:
if char == "0":
next_state = __TOKENIZER_STATE.INTEGER_0
add_char = True
elif char in "123456789":
next_state = __TOKENIZER_STATE.INTEGER
add_char = True
else:
raise ValueError("A - must be followed by a digit. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_EXP_0:
if char == "+" or char == "-" or char in "0123456789":
next_state = __TOKENIZER_STATE.INTEGER_EXP
add_char = True
else:
raise ValueError("An e in a number must be followed by a '+', '-' or digit. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_EXP:
if char in "0123456789":
add_char = True
elif is_delimiter(char):
completed = True
now_token = (TOKEN_TYPE.NUMBER, float("".join(token)))
next_state = __TOKENIZER_STATE.WHITESPACE
advance = False
else:
raise ValueError("A number exponent must consist only of digits. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.FLOATING_POINT:
if char in "0123456789":
add_char = True
elif char == "e" or char == "E":
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
completed = True
now_token = (TOKEN_TYPE.NUMBER, float("".join(token)))
next_state = __TOKENIZER_STATE.WHITESPACE
advance = False
else:
raise ValueError("A number must include only digits")
elif state == __TOKENIZER_STATE.FLOATING_POINT_0:
if char in "0123456789":
next_state = __TOKENIZER_STATE.FLOATING_POINT
add_char = True
else:
raise ValueError("A number with a decimal point must be followed by a fractional part")
elif state == __TOKENIZER_STATE.FALSE_1:
if char == "a":
next_state = __TOKENIZER_STATE.FALSE_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_2:
if char == "l":
next_state = __TOKENIZER_STATE.FALSE_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_3:
if char == "s":
next_state = __TOKENIZER_STATE.FALSE_4
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_4:
if char == "e":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.BOOLEAN, False)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_1:
if char == "r":
next_state = __TOKENIZER_STATE.TRUE_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_2:
if char == "u":
next_state = __TOKENIZER_STATE.TRUE_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_3:
if char == "e":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.BOOLEAN, True)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_1:
if char == "u":
next_state = __TOKENIZER_STATE.NULL_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_2:
if char == "l":
next_state = __TOKENIZER_STATE.NULL_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_3:
if char == "l":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NULL, None)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.STRING:
if char == "\"":
completed = True
now_token = (TOKEN_TYPE.STRING, "".join(token))
next_state = __TOKENIZER_STATE.STRING_END
elif char == "\\":
next_state = __TOKENIZER_STATE.STRING_ESCAPE
else:
add_char = True
elif state == __TOKENIZER_STATE.STRING_END:
if is_delimiter(char):
advance = False
next_state = __TOKENIZER_STATE.WHITESPACE
else:
raise ValueError("Expected whitespace or an operator after strin. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.STRING_ESCAPE:
next_state = __TOKENIZER_STATE.STRING
if char == "\\" or char == "\"":
add_char = True
elif char == "b":
char = "\b"
add_char = True
elif char == "f":
char = "\f"
add_char = True
elif char == "n":
char = "\n"
add_char = True
elif char == "t":
char = "\t"
add_char = True
elif char == "r":
char = "\r"
add_char = True
elif char == "/":
char = "/"
add_char = True
elif char == "u":
next_state = __TOKENIZER_STATE.UNICODE_1
charcode = 0
else:
raise ValueError("Invalid string escape: {}".format(char))
elif state == __TOKENIZER_STATE.UNICODE_1:
if char in "0123456789":
charcode = (ord(char) - 48) * 4096
elif char in "abcdef":
charcode = (ord(char) - 87) * 4096
elif char in "ABCDEF":
charcode = (ord(char) - 55) * 4096
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.UNICODE_2
char = ""
elif state == __TOKENIZER_STATE.UNICODE_2:
if char in "0123456789":
charcode += (ord(char) - 48) * 256
elif char in "abcdef":
charcode += (ord(char) - 87) * 256
elif char in "ABCDEF":
charcode += (ord(char) - 55) * 256
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.UNICODE_3
char = ""
elif state == __TOKENIZER_STATE.UNICODE_3:
if char in "0123456789":
charcode += (ord(char) - 48) * 16
elif char in "abcdef":
charcode += (ord(char) - 87) * 16
elif char in "ABCDEF":
charcode += (ord(char) - 55) * 16
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.UNICODE_4
char = ""
elif state == __TOKENIZER_STATE.UNICODE_4:
if char in "0123456789":
charcode += ord(char) - 48
elif char in "abcdef":
charcode += ord(char) - 87
elif char in "ABCDEF":
charcode += ord(char) - 55
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.STRING
char = chr(charcode)
add_char = True
if add_char:
token.append(char)
return advance, next_state, charcode
state = __TOKENIZER_STATE.WHITESPACE
char = stream.read(1)
index = 0
while char:
try:
advance, state, charcode = process_char(char, charcode)
except ValueError as e:
raise ValueError("".join([e.args[0], " at index {}".format(index)]))
if completed:
completed = False
token = []
yield now_token
if advance:
char = stream.read(1)
index += 1
process_char(" ", charcode)
if completed:
yield now_token
def parse_string(string):
return parse(StringIO(string))
def parse(file):
token_stream = tokenize(file)
val, token_type, token = __parse(token_stream, next(token_stream))
if token is not None:
raise ValueError("Improperly closed JSON object")
try:
next(token_stream)
except StopIteration:
return val
raise ValueError("Additional string after end of JSON")
def __parse(token_stream, first_token):
class KVP:
def __init__(self, key):
self.key = key
self.value = None
self.set = False
def __str__(self):
if self.set:
return "{}: {}".format(self.key, self.value)
else:
return "{}: <NULL>".format(self.key)
stack = []
token_type, token = first_token
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Expected object or array. Got '{}'".format(token))
else:
raise ValueError("Expected object or array. Got '{}'".format(token))
last_type, last_token = token_type, token
try:
token_type, token = next(token_stream)
except StopIteration as e:
raise ValueError("Too many opening braces") from e
try:
while True:
if isinstance(stack[-1], list):
if last_type == TOKEN_TYPE.OPERATOR:
if last_token == "[":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
elif token != "]":
raise ValueError("Array must either be empty or contain a value. Got '{}'".
format(token))
else:
stack.append(token)
elif last_token == ",":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Array value expected. Got '{}'".format(token))
else:
stack.append(token)
elif last_token == "]":
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
elif isinstance(stack[-1], KVP):
stack[-1].value = value
stack[-1].set = True
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Array items must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
else:
raise ValueError("Array items must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
elif last_token == "}":
raise ValueError("Array closed with a '}'")
else:
raise ValueError("Array should not contain ':'")
else:
raise ValueError("Unknown Error")
elif isinstance(stack[-1], dict):
if last_type == TOKEN_TYPE.OPERATOR:
if last_token == "{":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
elif token != "}":
raise ValueError("Object must either be empty or contain key value pairs."
" Got '{}'".format(token))
elif token_type == TOKEN_TYPE.STRING:
stack.append(KVP(token))
else:
raise ValueError("Object keys must be strings. Got '{}'".format(token))
elif last_token == ",":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Object key expected. Got '{}'".format(token))
elif token_type == TOKEN_TYPE.STRING:
stack.append(KVP(token))
else:
raise ValueError("Object keys must be strings. Got '{}'".format(token))
elif last_token == "}":
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
elif isinstance(stack[-1], KVP):
stack[-1].value = value
stack[-1].set = True
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Object key value pairs must be followed by a comma or "
"closing bracket. Got '{}'".format(value))
elif last_token == "]":
raise ValueError("Object closed with a ']'")
else:
raise ValueError("Object key value pairs should be separated by comma, not ':'")
elif isinstance(stack[-1], KVP):
if stack[-1].set:
if token_type == TOKEN_TYPE.OPERATOR:
if token != "}" and token != ",":
raise ValueError("Object key value pairs should be followed by ',' or '}'. Got '"
+ token + "'")
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Object key value pairs must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
if token == "}" and len(stack) == 1:
return stack[0], None, None
else:
raise ValueError("Object key value pairs should be followed by ',' or '}'. Got '"
+ token + "'")
else:
if token_type == TOKEN_TYPE.OPERATOR and token == ":" and last_type == TOKEN_TYPE.STRING:
pass
elif last_type == TOKEN_TYPE.OPERATOR and last_token == ":":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Object property value expected. Got '{}'".format(token))
else:
stack[-1].value = token
stack[-1].set = True
else:
raise ValueError("Object keys must be separated from values by a single ':'. "
"Got '{}'".format(token))
else:
value = stack.pop()
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Array items must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
last_type, last_token = token_type, token
token_type, token = next(token_stream)
except StopIteration as e:
if len(stack) == 1:
return stack[0], None, None
else:
raise ValueError("JSON Object not properly closed") from e
def stream_array(token_stream):
def process_token(token_type, token):
if token_type == TOKEN_TYPE.OPERATOR:
if token == ']':
return None, None, None
elif token == ",":
token_type, token = next(token_stream)
if token_type == TOKEN_TYPE.OPERATOR:
if token == "[" or token == "{":
return __parse(token_stream, (token_type, token))
else:
raise ValueError("Expected an array value. Got '{}'".format(token))
else:
return token, None, None
elif token == "[" or token == "{":
return __parse(token_stream, (token_type, token))
else:
raise ValueError("Array entries must be followed by ',' or ']'. Got '{}'".format(token))
else:
return token, None, None
token_type, token = next(token_stream)
if token_type != TOKEN_TYPE.OPERATOR or token != '[':
raise ValueError("Array must start with '['. Got '{}'".format(token))
token_type, token = next(token_stream)
while True:
while token is not None:
value, token_type, token = process_token(token_type, token)
if value is None:
return
yield value
token_type, token = next(token_stream)
| danielyule/naya | naya/json.py | Python | mit | 24,396 |
# ============================================================================
# FILE: member.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from .base import Base
import re
from deoplete.util import (
convert2list, parse_buffer_pattern, set_pattern, getlines)
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'member'
self.mark = '[M]'
self.min_pattern_length = 0
self._object_pattern = r'[a-zA-Z_]\w*(?:\(\)?)?'
self._prefix = ''
prefix_patterns = {}
set_pattern(prefix_patterns,
'_', '\.')
set_pattern(prefix_patterns,
'c,objc', ['\.', '->'])
set_pattern(prefix_patterns,
'cpp,objcpp', ['\.', '->', '::'])
set_pattern(prefix_patterns,
'perl,php', ['->'])
set_pattern(prefix_patterns,
'ruby', ['\.', '::'])
set_pattern(prefix_patterns,
'lua', ['\.', ':'])
self.vars = {
'prefix_patterns': prefix_patterns,
}
def get_complete_position(self, context):
# Check member prefix pattern.
for prefix_pattern in convert2list(
self.get_filetype_var(
context['filetype'], 'prefix_patterns')):
m = re.search(self._object_pattern + prefix_pattern + r'\w*$',
context['input'])
if m is None or prefix_pattern == '':
continue
self._prefix = re.sub(r'\w*$', '', m.group(0))
return re.search(r'\w*$', context['input']).start()
return -1
def gather_candidates(self, context):
return [{'word': x} for x in
parse_buffer_pattern(
getlines(self.vim),
r'(?<=' + re.escape(self._prefix) + r')\w+'
)
if x != context['complete_str']]
| killuazhu/vimrc | sources_non_forked/deoplete.nvim/rplugin/python3/deoplete/source/member.py | Python | mit | 2,077 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import time
import numpy as np
from pyqtgraph import PlotWindow
from six.moves import zip
sys.path.append('../../util')
sys.path.append('../..')
from .nidaq import NiDAQ
from pyqtgraph.functions import mkPen, mkColor
pw = PlotWindow()
time.clock()
sr = 100000
dur = 2.0
data = np.zeros(int(sr*dur))
dlen = len(data)
xVals = np.linspace(0, dur, dlen)
data += np.random.normal(size=dlen) + 20.
data[round(dlen*0.102):round(dlen*0.3)] += 20
data[round(dlen*0.3):round(dlen*0.5)] += 30
data[round(dlen*0.4)]+= 1000
data += np.sin(xVals*40677*2.0*np.pi)*4.
#data = sin(linspace(0, dur, sr*dur)* linspace(0, sr*2, sr*dur))
methods = ['subsample', 'mean', 'fourier', 'bessel_mean', 'butterworth_mean']
colors = [mkColor((255,0,0)), mkColor((0,255,0)), mkColor((0,0,255)), mkColor((255,0,255)), mkColor((255,255,0))]
def run(ds):
pw.plot(data, clear=True)
for m, c in zip(methods, colors):
d1 = data.copy()
t = time.clock()
d2 = NiDAQ.downsample(d1, ds, method=m)
print("Method %d: %f" % (m, time.clock()-t))
p = pw.plot(y=d2, x=np.linspace(0, len(d2)*ds, len(d2)), pen=mkPen(c))
p.setZValue(10000)
#pw.plot(d2, pen=mkPen(colors[i-1]))
def showDownsample(**kwargs):
d1 = data.copy()
d2 = NiDAQ.downsample(d1, **kwargs)
xv2 = xVals[::kwargs['ds']][:len(d2)]
pw.plot(y=d1, x=xVals, clear=True)
pw.plot(y=d2[:len(xv2)], x=xv2, pen=mkPen((255, 0, 0)))
def showTransfer(**kwargs):
xVals = np.linspace(0, dur, sr*dur)
#data = sin(xVals* linspace(0, sampr*2, sampr*dur))
data = np.random.normal(size=sr*dur)
data2 = NiDAQ.lowpass(data, **kwargs)
pw.plot(y=data, x=xVals, clear=True)
pw.plot(y=data2, x=xVals, pen=mkPen((255, 0, 0)))
#def downsample(data, ds, method=1):
#if method == 1:
## Method 1:
## decimate by averaging points together (does not remove HF noise, just folds it down.)
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#elif method == 2:
## Method 2:
## Decimate using fourier resampling -- causes ringing artifacts.
#newLen = int(data.shape[0] / ds)
#data = scipy.signal.resample(data, newLen, window=8) # Use a kaiser window with beta=8
#elif method == 3:
## Method 3:
## Decimate by lowpass filtering, then average points together. (slow, artifacts at beginning and end of traces)
## Not as good as signal.resample for removing HF noise, but does not generate artifacts either.
## worst at removing HF noise (??)
#b,a = scipy.signal.bessel(8, 1.0/ds, btype='low')
#base = data.mean()
#data = scipy.signal.lfilter(b, a, data-base) + base
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#elif method == 4:
##Method 4:
### Pad data, forward+reverse bessel filter, then average down
#b,a = scipy.signal.bessel(4, 1.0/ds, btype='low')
#padded = numpy.hstack([data[:100], data, data[-100:]]) ## can we intelligently decide how many samples to pad with?
#data = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, padded)[::-1])[::-1][100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
##data = scipy.signal.lfilter(b, a, padded)[100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#elif method == 5:
##Method 4:
### Pad data, forward+reverse butterworth filter, then average down
#ord, Wn = scipy.signal.buttord(1.0/ds, 1.5/ds, 0.01, 0.99)
#print "butt ord:", ord, Wn
#b,a = scipy.signal.butter(ord, Wn, btype='low')
#padded = numpy.hstack([data[:100], data, data[-100:]]) ## can we intelligently decide how many samples to pad with?
#data = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, padded)[::-1])[::-1][100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
##data = scipy.signal.lfilter(b, a, padded)[100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#return data
| acq4/acq4 | acq4/devices/NiDAQ/resample_test.py | Python | mit | 4,804 |
__author__ = 'Exter, 0xBADDCAFE'
import wx
class FTDropTarget(wx.DropTarget):
"""
Implements drop target functionality to receive files and text
receiver - any WX class that can bind to events
evt - class that comes from wx.lib.newevent.NewCommandEvent call
class variable ID_DROP_FILE
class variable ID_DROP_TEXT
"""
ID_DROP_FILE = wx.NewId()
ID_DROP_TEXT = wx.NewId()
def __init__(self, receiver, evt):
"""
receiver - any WX class that can bind to events
evt - class that comes from wx.lib.newevent.NewCommandEvent call
"""
wx.DropTarget.__init__(self)
self.composite = wx.DataObjectComposite()
self.text_do = wx.TextDataObject()
self.file_do = wx.FileDataObject()
self.composite.Add(self.text_do)
self.composite.Add(self.file_do)
self.SetDataObject(self.composite)
self.receiver = receiver
self.evt = evt
def OnData(self, x, y, result):
"""Handles dropping files/text """
if self.GetData():
drop_type = self.composite.GetReceivedFormat().GetType()
if drop_type in (wx.DF_TEXT, wx.DF_UNICODETEXT):
wx.PostEvent(self.receiver, self.evt(id=self.ID_DROP_TEXT, text=self.text_do.GetText()))
elif drop_type == wx.DF_FILENAME:
wx.PostEvent(self.receiver, self.evt(id=self.ID_DROP_FILE, files=self.file_do.GetFilenames()))
assert isinstance(result, object)
return result
| exter/pycover | droptarget.py | Python | mit | 1,525 |
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test',
}
}
ROOT_URLCONF = 'urls'
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django_featurette',
)
TEMPLATE_CONTEXT_PROCESSORS += ("django.core.context_processors.request",)
SECRET_KEY = 'sk'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
) | GermanoGuerrini/django-featurette | tests/settings.py | Python | mit | 597 |
"""Test energy_profiler module."""
import unittest
from physalia.energy_profiler import AndroidUseCase
# pylint: disable=missing-docstring
class TestEnergyProfiler(unittest.TestCase):
def test_empty_android_use_case(self):
# pylint: disable=no-self-use
use_case = AndroidUseCase(
name="Test",
app_apk="no/path",
app_pkg="no.package",
app_version="0.0.0",
run=None,
prepare=None,
cleanup=None
)
use_case.run()
| TQRG/physalia | physalia/tests/test_energy_profiler.py | Python | mit | 534 |
def fib():
a, b = 1, 1
while True:
yield b
a, b = b, a + b
def pares(seq):
for n in seq:
if n % 2 == 0:
yield n
def menores_4M(seq):
for n in seq:
if n > 4000000:
break
yield n
print (sum(pares(menores_4M(fib()))))
| renebentes/Python4Zumbis | Materiais/Ultima Semana/euler 02.py | Python | mit | 285 |
from core.rule_core import *
from core import yapi
from core.config_loader import cur_conf
class YunoModule:
name = "ores"
cfg_ver = None
ores_api = yapi.ORES
config = [
{
"models": {
"damaging": {"max_false": 0.15, "min_true": 0.8},
"goodfaith": {"min_false": 0.8, "max_true": 0.15}
},
"score": 1,
"expiry": 24
}
]
def load_config(self):
if core.config.config_mode == "online":
pass
def getScores(self, rev):
tries = 2
revid_data = 1
# Check result and check for errors
# If error faced then try again once
for i in reversed(range(tries)):
scores = self.ores_api.getScore([rev["revision"]["new"]])[cur_conf["core"]["lang"]+"wiki"]["scores"]
revid_data = scores[str(rev["revision"]["new"])]
for item in revid_data:
if "error" in revid_data[item] and "scores" not in revid_data[item]:
if i <= 0:
logger.error("failed to fetch ores revision data: %s" % str(revid_data))
return False
else:
break
return revid_data
def run(self, rev):
score = 0
expiry = None
revid_data = self.getScores(rev)
if not revid_data:
return score, expiry
for rule in self.config:
failed = False
for item in rule["models"]:
if failed:
break
for value in rule["models"][item]:
if value == "max_false" and rule["models"][item][value] < revid_data[item]["score"]["probability"]["false"]:
failed = True
break
elif value == "min_false" and rule["models"][item][value] > revid_data[item]["score"]["probability"]["false"]:
failed = True
break
elif value == "max_true" and rule["models"][item][value] < revid_data[item]["score"]["probability"]["true"]:
failed = True
break
elif value == "min_true" and rule["models"][item][value] > revid_data[item]["score"]["probability"]["true"]:
failed = True
break
if not failed and rule["score"] > score:
score = rule["score"]
expiry = rule["expiry"]
return score, expiry
| 4shadoww/stabilizerbot | core/rules/ores.py | Python | mit | 2,595 |
#!/usr/bin/env python3
#!coding=utf-8
from .record import Record,RecordSet,QueryValue
from .heysqlware import *
| mu2019/heysqlware | heysqlware/__init__.py | Python | mit | 113 |
import pytz
from pendulum import _safe_timezone
from pendulum.tz.timezone import Timezone
def test_safe_timezone_with_tzinfo_objects():
tz = _safe_timezone(pytz.timezone("Europe/Paris"))
assert isinstance(tz, Timezone)
assert "Europe/Paris" == tz.name
| sdispater/pendulum | tests/test_main.py | Python | mit | 268 |
sandwich_orders = ['Bacon','Bacon, egg and cheese','Bagel toast','pastrami','pastrami','pastrami']
print ('pastrami sandwich was sold out')
finished_sandwiches = []
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
print(sandwich_orders) | lluxury/pcc_exercise | 07/pastrami.py | Python | mit | 265 |
# -*- coding: utf-8 -*-
import pytest
from parglare import Parser, Grammar
from parglare.exceptions import GrammarError, ParseError, RRConflicts
def test_repeatable_zero_or_more():
"""
Tests zero or more repeatable operator.
"""
grammar = """
S: "2" b* "3";
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0')
assert g.get_nonterminal('b_1')
p = Parser(g)
input_str = '2 1 1 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", [], "3"]
def test_repeatable_zero_or_more_with_separator():
"""
Tests zero or more repeatable operator with separator.
"""
grammar = """
S: "2" b*[comma] "3";
terminals
b: "1";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0_comma')
p = Parser(g)
input_str = '2 1, 1 , 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", [], "3"]
def test_repeatable_one_or_more():
"""
Tests one or more repeatable operator.
"""
grammar = """
S: "2" b+ "3";
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_1')
p = Parser(g)
input_str = '2 1 1 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
with pytest.raises(ParseError) as e:
result = p.parse(input_str)
assert 'Expected: b' in str(e.value)
def test_repeatable_one_or_more_with_separator():
"""
Tests one or more repeatable operator with separator.
"""
grammar = """
S: "2" b+[comma] "3";
terminals
b: "1";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_1_comma')
p = Parser(g)
input_str = '2 1, 1 , 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
with pytest.raises(ParseError) as e:
p.parse(input_str)
assert 'Expected: b' in str(e.value)
def test_optional():
"""
Tests optional operator.
"""
grammar = """
S: "2" b? "3"?;
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_opt')
p = Parser(g)
input_str = '2 1 3'
result = p.parse(input_str)
assert result == ["2", "1", "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", None, "3"]
input_str = '2 1'
result = p.parse(input_str)
assert result == ["2", "1", None]
input_str = ' 1 3'
with pytest.raises(ParseError) as e:
p.parse(input_str)
assert 'Expected: 2' in str(e.value)
def test_optional_no_modifiers():
"""
Tests that optional operator doesn't allow modifiers.
"""
grammar = """
S: "2" b?[comma] "3"?;
terminals
b: "1";
comma: ",";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert "Repetition modifier not allowed" in str(e.value)
def test_multiple_repetition_operators():
"""
Test using of multiple repetition operators.
"""
grammar = """
S: "2" b*[comma] c+ "3"?;
terminals
b: "b";
c: "c";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0_comma')
assert g.get_nonterminal('c_1')
p = Parser(g)
input_str = '2 b, b c 3'
result = p.parse(input_str)
assert result == ["2", ["b", "b"], ["c"], "3"]
def test_repetition_operator_many_times_same():
"""
Test using the same repetition operator multiple times.
"""
grammar = """
S: "2" b*[comma] "3"? b*[comma];
terminals
b: "b";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0_comma')
p = Parser(g)
input_str = '2 b 3 b, b'
result = p.parse(input_str)
assert result == ["2", ["b"], "3", ["b", "b"]]
def test_repeatable_one_zero_rr_conflicts():
"""
Check that translations of B+ and B* don't produce R/R conflict.
"""
grammar = """
S: A B+ C;
S: A B* D;
terminals
A:; B:; C:; D:;
"""
g = Grammar.from_string(grammar, _no_check_recognizers=True)
# Check if parser construction raises exception
try:
Parser(g)
except RRConflicts:
pytest.fail("R/R conflicts not expected here.")
| igordejanovic/parglare | tests/func/grammar/test_repeatable.py | Python | mit | 4,615 |
# -*- coding: utf-8 -*-
"""CMS view for static pages"""
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from boski.mixins import LoginRequiredMixin
from boski.views.crud import ListView, CreateView
from boski.decorators import with_template
from module.static_page.models import Entry
from module.static_page.cms.forms import EntryForm
class List(ListView, LoginRequiredMixin):
breadcrumbs = (
{'name': _('Static pages'), 'url': 'cms:static_page:index'},
)
queryset = Entry.objects.non_deleted()
listingColumns = (
('id', '#'),
('title', _('Title')),
('created_at', _('Created')),
('action', _('Actions'))
)
filters = (
('created_at__gte', {
'label': _('Created from'),
'type': 'text',
'class': 'calendar',
}),
('created_at__lte', {
'label': _('To'),
'type': 'text',
'class': 'calendar',
})
)
mapColumns = {
'id': '_displayAsIs',
'title': '_displayEditLink',
'created_by': '_displayAsIs',
'created_at': '_displayDate',
'action': '_displayStaticActionLink',
}
orderingColumns = {'id', 'title', 'created_at'}
def get_fields_name(self):
fields_name = super(List, self).get_fields_name()
return fields_name + ['activated_at', 'slug']
class Create(LoginRequiredMixin, CreateView):
form_class = EntryForm
model = Entry
@property
def breadcrumbs(self):
return (
{'name': _('Static page'), 'url': 'cms:static_page:index'},
{
'name': self.name,
'url': 'cms:static_page:update',
'pk': self.get_object().pk
},
)
class Update(LoginRequiredMixin, CreateView):
form_class = EntryForm
model = Entry
@property
def breadcrumbs(self):
return (
{'name': _('Static page'), 'url': 'cms:static_page:index'},
{
'name': self.name,
'url': 'cms:static_page:update',
'pk': self.get_object().pk,
},
)
@login_required
@with_template('crud/create.html')
def create(request):
form = EntryForm(request.POST or None)
if form.is_valid():
entry = form.save(commit=False)
""":type : Entry """
entry.save()
messages.success(request, _('New static page has been created'))
return HttpResponseRedirect(reverse('cms:static_page:index'))
name = _('Create')
request.breadcrumbs = (
{'name': _('Static page'), 'url': 'cms:static_page:index'},
{'name': name, 'url': 'cms:static_page:create'},
)
actions = {
'create': 'create',
'update': 'update',
'delete': 'delete',
'index': 'index',
}
return locals()
@login_required
@with_template('crud/update.html')
def update(request, pk):
entry = Entry.objects.get(pk=pk)
form = EntryForm(request.POST or None, instance=entry)
if form.is_valid():
entry = form.save(commit=False)
""" :type : Entry """
entry.save()
messages.success(
request, _('Successfully updated static page "%s".') % entry)
if request.POST.get('next', None) == 'edit':
return HttpResponseRedirect(reverse(
'cms:static_page:update', args=[pk]
))
return HttpResponseRedirect(reverse('cms:static_page:index'))
name = _('Edit entry "%s"') % entry
request.breadcrumbs = (
{'name': _('Static page'), 'url': 'cms:static_page:index'},
{'name': name, 'url': 'cms:static_page:update', 'pk': entry.pk},
)
actions = {
'create': 'create',
'update': 'update',
'delete': 'delete',
'index': 'index',
}
return dict(locals().items() + {'object': entry}.items())
@login_required
@with_template('crud/delete.html')
def delete(request, pk):
entry = Entry.objects.get(pk=pk)
if request.POST:
entry.do_delete()
messages.success(
request, _('Static page "%s" has been deleted') % entry)
return HttpResponseRedirect(reverse('cms:static_page:index'))
name = _('Delete entry "%s"') % entry
request.breadcrumbs = (
{'name': _('Static page'), 'url': 'cms:static_page:index'},
{'name': name, 'url': 'cms:static_page:delete', 'pk': entry.pk},
)
actions = {
'create': 'create',
'update': 'update',
'delete': 'delete',
'index': 'index',
}
return dict(locals().items() + {'object': entry}.items())
@login_required
def activate(request, pk):
try:
entry = Entry.objects.get(pk=pk)
""" :type : Entry """
entry.do_activate()
messages.success(
request, _(u'Static page "%s" has been activated') % entry)
except Exception:
messages.error(request, _('Error occurred during saving'))
return HttpResponseRedirect(reverse('cms:static_page:index'))
| Alkemic/webpage | module/static_page/cms/views.py | Python | mit | 5,262 |
import random
from authorize import Customer, Transaction
from authorize import AuthorizeResponseError
from datetime import date
from nose.plugins.attrib import attr
from unittest import TestCase
FULL_CUSTOMER = {
'email': '[email protected]',
'description': 'Cool web developer guy',
'customer_type': 'individual',
'billing': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
},
'bank_account': {
'routing_number': '322271627',
'account_number': '00987467838473',
'name_on_account': 'Rob Otron',
'bank_name': 'Evil Bank Co.',
'echeck_type': 'CCD'
},
'shipping': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
}
}
CUSTOMER_WITH_CARD = {
'email': '[email protected]',
'description': 'Cool web developer guy',
'credit_card': {
'card_number': '4111111111111111',
'expiration_date': '04/{0}'.format(date.today().year + 1),
'card_code': '456',
},
}
@attr('live_tests')
class CustomerTests(TestCase):
def test_live_customer(self):
# Create customers
result = Customer.create()
Customer.create(FULL_CUSTOMER)
Customer.create(CUSTOMER_WITH_CARD)
# Read customer information. This returns the payment profile IDs
# address IDs for the user
customer_id = result.customer_id
Customer.details(customer_id)
# Update customer information
Customer.update(customer_id, {
'email': '[email protected]',
'description': 'Cool web developer guy'
})
# Delete customer information
Customer.delete(customer_id)
self.assertRaises(AuthorizeResponseError, Customer.delete, customer_id)
Customer.list()
def test_live_customer_from_transaction(self):
INVALID_TRANS_ID = '123'
self.assertRaises(AuthorizeResponseError, Customer.from_transaction, INVALID_TRANS_ID)
# Create the transaction
transaction = CUSTOMER_WITH_CARD.copy()
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.auth(transaction)
trans_id = result.transaction_response.trans_id
# Create the customer from the above transaction
result = Customer.from_transaction(trans_id)
customer_id = result.customer_id
result = Customer.details(customer_id)
self.assertEquals(transaction['email'], result.profile.email)
| vcatalano/py-authorize | tests/test_live_customer.py | Python | mit | 3,018 |
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
| ActiveState/code | recipes/Python/523034_emulate_collectionsdefaultdict/recipe-523034.py | Python | mit | 1,492 |
# -*- coding: utf-8 -*-
#############################################################
# 1. Imports
#############################################################
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
#############################################################
# 2. General Functions
#############################################################
def lsq(x, y):
assert len(x) == len(y), 'Array dimensions do not match'
n = float(len(x)) # Don't lose precision with int * float multiplication
# compute covariance matrix and correlation coefficient of data
cov = np.cov(x, y)
varx = cov[0][0]
vary = cov[1][1]
sxy = cov[0][1]
r = sxy / (np.sqrt(vary) * np.sqrt(varx))
# lambda expression for a line
# dummy parameter array of [1, 1]
f = lambda x, *p: p[0]*x + p[1]
pars = [1, 1]
pvals, pcov = curve_fit(f, x, y, p0=pars)
m, b = pvals
sm = np.sqrt(pcov[0, 0])
sb = np.sqrt(pcov[1, 1])
sy = np.sqrt(vary)
# y = mx + b; r is correlation
return m, b, sy, sm, sb, r
#############################################################
# 3. Data & Globals
#############################################################
current = np.array([5.372, 10.024, 14.975, 20.482, 24.878, 30.105]) * 1e-3 # mA
voltage = np.array([0.503, 1.043, 1.526, 2.034, 2.521, 3.018]) # V
# The multimeter tends to have a variable uncertainty, so these arrays is needed
dI = np.array([0.001, 0.002, 0.002, 0.001, 0.001, 0.003]) * 1e-3
dV = np.array([0.002, 0.001, 0.003, 0.001, 0.001, 0.002])
#############################################################
# 4. Lab-Specific Functions
#############################################################
def plot_line():
# Least-squares linear regression for y = mx + b
m, b, sy, sm, sb, r = lsq(current * 1e3, voltage) # We want to plot in mA
# You will NEED to call this for each plot so that you don't have multiple plots
# overlaid on each other
plt.figure()
# Range upon which we want to plot the line
x = np.linspace(5, 31, 1000)
plt.plot(x, m*x + b, 'c--')
plt.errorbar(x=(current * 1e3), y=voltage, xerr=(dI * 1e3), yerr=dV, fmt='r.', ecolor='k', alpha=0.5)
plt.xlabel('Current ($mA$)')
plt.ylabel('Voltage ($V$)')
def get_resistance():
m, b, sy, sm, sb, r = lsq(current, voltage)
# Resistance is the slope m; its uncertainty sm is already computed by lsq()
return (m, sm)
| nyuphys/DataMaster | example/lab.py | Python | mit | 2,511 |
from bson import ObjectId
from . import repeating_schedule
from state_change import StateChange
class StateChangeRepeating(StateChange):
def __init__(self, seconds_into_week, AC_target, heater_target, fan, id=None):
self.id = id
self.seconds_into_week = seconds_into_week
self.AC_target = AC_target
self.heater_target = heater_target
self.fan = fan
assert type(seconds_into_week) is int or long
assert type(AC_target) is int or float
assert type(heater_target) is int or float
assert type(fan) is int or float
@classmethod
def from_dictionary(cls, json):
seconds_into_week = json["week_time"]
AC_target = json["state"]["AC_target"]
heater_target = json["state"]["heater_target"]
fan = json["state"]["fan"]
try:
id = ObjectId(json["_id"]["$oid"])
except KeyError:
id = None
except TypeError:
try:
id = ObjectId(json["_id"])
except:
id = None
return cls(seconds_into_week, AC_target, heater_target, fan, id=id)
@classmethod
def get_current(cls, now):
week_time = now.weekday() * 24 * 60 ** 2 + (now.hour * 60 + now.minute) * 60
result = repeating_schedule.aggregate(
[
{"$project": {
"time_delta": {"$mod": [{"$add": [{"$subtract": [week_time, "$week_time"]}, 24 * 7 * 60 ** 2]},
24 * 7 * 60 ** 2]},
"state": 1,
"week_time": 1}
},
{"$sort": {"time_delta": 1}}
]).next()
return cls.from_dictionary(result)
def save(self):
delayed_state_change = {
"week_time": self.seconds_into_week,
"state": {"AC_target": self.AC_target, "heater_target": self.heater_target, "fan": self.fan}
}
if self.id is not None:
delayed_state_change["_id"] = self.id
return repeating_schedule.save(delayed_state_change)
def to_dictionary(self):
return {"week_time": self.seconds_into_week,
"_id": str(self.id),
"state": {"AC_target": self.AC_target,
"heater_target": self.heater_target,
"fan": self.fan}}
@classmethod
def get_all_dic(cls):
all_items = cls.get_all()
result = []
for item in all_items:
result.append(item.to_dictionary())
return result | IAPark/PITherm | src/shared/models/Mongo/state_change_repeating.py | Python | mit | 2,592 |
import os, sys
import datetime
from glob import glob
import json
import numpy as np
import pandas
from skimage.morphology import binary_erosion
from nitime.timeseries import TimeSeries
from nitime.analysis import SpectralAnalyzer, FilterAnalyzer
import nibabel
import nipype.interfaces.spm as spm
from nipype.interfaces.base import CommandLine
import nipype.interfaces.fsl as fsl
from nipype.utils import filemanip
import nipype.interfaces.afni as afni
## deal with relative import for now
cwd = os.getcwd()
sys.path.insert(0, cwd)
import nipype_ext
########################
## naming structure used in scripts to make subdirectories
defaults = {
'rawdir': 'raw',
'func_glob': 'B*func4d.nii*',
'despiked_func_glob' : 'dsB*func4d.nii*',
'anat_glob' : 'brainmask.nii*',
'aparc_glob' : 'aparcaseg.nii*',
'aligned' : 'align4d_{0}.nii*',
'realign_ants':'ants_realign',
'realign_spm': 'spm_realign_slicetime',
'despike' : 'despike_',
'coreg' : 'coreg_masks',
'bandpass' : 'bandpass',
'model_fsl': 'model_fsl',
'wm_labels': [2,41, 77,78,79],
'vent_labels': [4,5,14,15,28,43,44,60,72,75,76],
'movement_names' : ['mc{}.1D'.format(x+1) for x in xrange(6)],
'noise_names' : ['wm.1D', 'csf.1D', 'global.1D']
}
def get_files(dir, globstr):
"""
uses glob to find dir/globstr
returns sorted list; number of files
"""
searchstr = os.path.join(dir, globstr)
files = glob(searchstr)
files.sort()
return files, len(files)
def make_datestr():
now = datetime.datetime.now()
return now.strftime('%Y_%m_%d_%H_%S')
def make_dir(base_dir, dirname='newdir'):
""" makes a new directory if it doesnt alread exist
returns full path
Parameters
----------
base_dir : str
the root directory
dirname : str (default pib_nifti)
new directory name
Returns
-------
newdir : str
full path of new directory
"""
newdir = os.path.join(base_dir,dirname)
if not os.path.isdir(base_dir):
raise IOError('ERROR: base dir %s DOES NOT EXIST'%(base_dir))
directory_exists = os.path.isdir(newdir)
if not directory_exists:
os.mkdir(newdir)
return newdir, directory_exists
def fsl_make4d(infiles, newfile):
"""a list of files is passed, a 4D volume will be created
in the same directory as the original files"""
if not hasattr(infiles, '__iter__'):
raise IOError('expected list,not %s'%(infiles))
startdir = os.getcwd()
pth, nme = os.path.split(infiles[0])
os.chdir(pth)
merge = fsl.Merge()
merge.inputs.in_files = infiles
merge.inputs.dimension = 't'
merge.inputs.merged_file = newfile
out = merge.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
return out.outputs.merged_file
def fsl_split4d(infile, outdir, sid):
""" uses fsl to split 4d file into parts
based on sid, puts resulting files in outdir
"""
startdir = os.getcwd()
pth, nme = os.path.split(infile)
os.chdir(outdir)
im = fsl.Split()
im.inputs.in_file = infile
im.inputs.dimension = 't'
im.inputs.out_base_name = sid
im.inputs.output_type = 'NIFTI'
out = im.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
# fsl split may include input file as an output
## bad globbing...
# remove it here
outfiles = out.outputs.out_files
outfiles = [x for x in outfiles if not x == im.inputs.in_file]
return outfiles
def get_slicetime(nslices):
"""
If TOTAL # SLICES = EVEN, then the excitation order when interleaved
is EVENS first, ODDS second.
If TOTAL # SLICES = ODD, then the excitation order when interleaved is
ODDS first, EVENS second.
Returns:
sliceorder: list
list containing the order of slice acquisition used for slicetime
correction
"""
if np.mod(nslices,2) == 0:
sliceorder = np.concatenate((np.arange(2,nslices+1,2),
np.arange(1,nslices+1,2)))
else:
sliceorder = np.concatenate((np.arange(1,nslices+1,2),
np.arange(2,nslices+1,2)))
# cast to a list for use with interface
return list(sliceorder)
def get_slicetime_vars(infiles, TR=None):
"""
uses nibabel to get slicetime variables
Returns:
dict: dict
nsclies : number of slices
TA : acquisition Time
TR: repetition Time
sliceorder : array with slice order to run slicetime correction
"""
if hasattr(infiles, '__iter__'):
img = nibabel.load(infiles[0])
else:
img = nibabel.load(infiles)
hdr = img.get_header()
if TR is None:
raise RuntimeError('TR is not defined ')
shape = img.get_shape()
nslices = shape[2]
TA = TR - TR/nslices
sliceorder = get_slicetime(nslices)
return dict(nslices=nslices,
TA = TA,
TR = TR,
sliceorder = sliceorder)
def save_json(inobj, outfile):
''' save inobj to outfile using json'''
try:
json.dump(inobj, open(outfile,'w+'))
except:
raise IOError('Unable to save %s to %s (json)'%(inobj, outfile))
def load_json(infile):
''' use json to load objects in json file'''
try:
result = json.load(open(infile))
except:
raise IOError('Unable to load %s' %infile)
return result
def zip_files(files):
if not hasattr(files, '__iter__'):
files = [files]
for f in files:
base, ext = os.path.splitext(f)
if 'gz' in ext:
# file already gzipped
continue
cmd = CommandLine('gzip %s' % f)
cout = cmd.run()
if not cout.runtime.returncode == 0:
logging.error('Failed to zip %s'%(f))
def unzip_file(infile):
""" looks for gz at end of file,
unzips and returns unzipped filename"""
base, ext = os.path.splitext(infile)
if not ext == '.gz':
return infile
else:
if os.path.isfile(base):
return base
cmd = CommandLine('gunzip %s' % infile)
cout = cmd.run()
if not cout.runtime.returncode == 0:
print 'Failed to unzip %s'%(infile)
return None
else:
return base
def copy_file(infile, newdir):
""" copy infile to new directory
return full path of new file
"""
cl = CommandLine('cp %s %s'%(infile, newdir))
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to copy %s' % infile
print out.runtime.stderr
return None
else:
basenme = os.path.split(infile)[1]
newfile = os.path.join(newdir, basenme)
return newfile
def copy_files(infiles, newdir):
"""wraps copy file to run across multiple files
returns list"""
newfiles = []
for f in infiles:
newf = copy_file(f, newdir)
newfiles.append(newf)
return newfiles
def remove_files(files):
"""removes files """
if not hasattr(files, '__iter__'):
cl = CommandLine('rm %s'% files)
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to delete %s' % files
print out.runtime.stderr
return
for f in files:
cl = CommandLine('rm %s'% f)
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to delete %s' % f
print out.runtime.stderr
def afni_despike(in4d):
""" uses afni despike to despike a 4D dataset
saves as ds_<filename>"""
dspike = afni.Despike()
dspike.inputs.in_file = in4d
dspike.inputs.outputtype = 'NIFTI_GZ'
dspike.inputs.ignore_exception = True
outfile = filemanip.fname_presuffix(in4d, 'ds')
dspike.inputs.out_file = outfile
res = dspike.run()
return res.runtime.returncode, res
def spm_realign(infiles, matlab='matlab-spm8'):
""" Uses SPM to realign files"""
startdir = os.getcwd()
pth, _ = os.path.split(infiles[0])
os.chdir(pth)
rlgn = spm.Realign(matlab_cmd = matlab)
rlgn.inputs.in_files = infiles
rlgn.inputs.register_to_mean = True
out = rlgn.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None, None, None
return out.outputs.mean_image, out.outputs.realigned_files,\
out.outputs.realignment_parameters
def spm_realign_unwarp(infiles, matlab = 'matlab-spm8'):
""" uses spm to run realign_unwarp
Returns
-------
mean_img = File; mean generated by unwarp/realign
realigned_files = Files; files unwarped and realigned
parameters = File; file holding the trans rot params
"""
startdir = os.getcwd()
pth, _ = os.path.split(infiles[0])
os.chdir(pth)
ru = nipype_ext.RealignUnwarp(matlab_cmd = matlab)
ru.inputs.in_files = infiles
ruout = ru.run()
os.chdir(startdir)
if not ruout.runtime.returncode == 0:
print ruout.runtime.stderr
return None, None, None
return ruout.outputs.mean_image, ruout.outputs.realigned_files,\
ruout.outputs.realignment_parameters
def make_mean(niftilist, outfile):
"""given a list of nifti files
generates a mean image"""
if not hasattr(niftilist, '__iter__'):
raise IOError('%s is not a list of valid nifti files,'\
' cannot make mean'%niftilist)
n_images = len(niftilist)
affine = nibabel.load(niftilist[0]).get_affine()
shape = nibabel.load(niftilist[0]).get_shape()
newdat = np.zeros(shape)
for item in niftilist:
newdat += nibabel.load(item).get_data().copy()
newdat = newdat / n_images
newdat = np.nan_to_num(newdat)
newimg = nibabel.Nifti1Image(newdat, affine)
newimg.to_filename(outfile)
return outfile
def mean_from4d(in4d, outfile):
""" given a 4D files, calc mean across voxels (time)
and write new 3D file to outfile with same mapping
as in4d"""
##FIXME consider unzipping files first if this is slow
## Fast memmap doesnt work on zipped files very well
affine = nibabel.load(in4d).get_affine()
dat = nibabel.load(in4d).get_data()
mean = dat.mean(axis=-1)
newimg = nibabel.Nifti1Image(mean, affine)
try:
newimg.to_filename(outfile)
return outfile
except:
raise IOError('Unable to write {0}'.format(outfile))
def simple_mask(dataf, maskf, outfile, thr=0):
""" sets values in data to zero if they are zero in mask"""
img = nibabel.load(dataf)
dat = img.get_data()
mask = nibabel.load(maskf).get_data()
if not dat.shape == mask.shape:
raise IOError('shape mismatch {0}, {1}'.format(dataf, maskf))
dat[mask <=thr] = 0
newimg = nibabel.Nifti1Image(dat, img.get_affine())
newimg.to_filename(outfile)
return outfile
def aparc_mask(aparc, labels, outfile = 'bin_labelmask.nii.gz'):
""" takes coreg'd aparc and makes a mask based on label values
Parameters
==========
aparc : filename
file containing label image (ints)
labels : tuple
tuple of label values (ints)
"""
pth, _ = os.path.split(outfile)
img = nibabel.load(aparc)
mask = np.zeros(img.get_shape())
label_dat = img.get_data()
for label in labels:
mask[label_dat == label] = 1
masked_img = nibabel.Nifti1Image(mask, img.get_affine())
outf = os.path.join(pth, outfile)
masked_img.to_filename(outf)
return outf
def erode(infile):
""" use skimage.morphology to quickly erode binary mask"""
img = nibabel.load(infile)
dat = img.get_data().squeeze()
## make kernel
tmp = np.diag([0,1,0])
mid = np.zeros((3,3))
mid[1,:] = 1
mid[:,1] = 1
kernel = np.hstack((tmp, mid, tmp))
kernel.shape = (3,3,3)
## erode with kernel
eroded = binary_erosion(dat, kernel)
eroded = eroded.astype(int)
newfile = filemanip.fname_presuffix(infile, 'e')
newimg = nibabel.Nifti1Image(eroded, img.get_affine())
newimg.to_filename(newfile)
return newfile
def get_seedname(seedfile):
_, nme, _ = filemanip.split_filename(seedfile)
return nme
def extract_seed_ts(data, seeds):
""" check shape match of data and seed if same assume registration
extract mean of data in seed > 0"""
data_dat = nibabel.load(data).get_data()
meants = []
for seed in seeds:
seed_dat = nibabel.load(seed).get_data().squeeze()
assert seed_dat.shape == data_dat.shape[:3]
seed_dat[data_dat[:,:,:,0].squeeze() <=0] = 0
tmp = data_dat[seed_dat > 0,:]
meants.append(tmp.mean(0))
return meants
def mask4d_with3d(datf, maskf, labels):
""" given a 4D data file, and a mask file
for each label in labels, pull the mean ts
and save to an array that is nlabels X ntimepoints"""
dat = nibabel.load(datf).get_data()
mask = nibabel.load(maskf).get_data()
if not dat.shape[:3] == mask.shape:
raise ValueError('Shape of dat does not match mask')
result = np.empty((len(labels), dat.shape[-1]))
for val, label in enumerate(labels):
region = dat[mask == label, :]
result[val, :] = region.mean(axis=0)
return result
def bandpass_data():
""" filters for 4D images and timeseries in txt files
Uses afni 3dBandpass
"""
pass
def nitime_bandpass(data, tr, ub=0.15, lb=0.0083):
""" use nittime to bandpass filter regressors
format of data shoud be samples X timepoints"""
ts = TimeSeries(data, sampling_interval=tr)
filtered_ts = FilterAnalyzer(ts, ub=ub, lb=lb)
return filtered_ts.data
def write_filtered(data, outfile):
data.to_file(outfile)
def bandpass_regressor():
""" filters motion params and timeseries from csf and white matter
(also global signal when relevant)
Use afni 1dBandpass, motion values in a 1d file"""
pass
def zero_pad_movement(dataframe):
#insert row of zeros into a dataframe
rows, cols = dataframe.shape
newdat = np.zeros((rows+1, cols))
newdat[1:,:] = dataframe
return pandas.DataFrame(newdat, columns = dataframe.columns)
def smooth_to_fwhm(in4d, outfile = None, fwhm = '8'):
""" 3dBlurToFWHM -input res4d.nii.gz -FWHM 8
use 3dAFNItoNIFTI to convert"""
if outfile is None:
outfile = filemanip.fname_presuffix(in4d,
prefix = 'blur_{}'.format(fwhm))
cmd = '3dBlurToFWHM'
fullcmd = ' '.join([cmd,
'-prefix',
outfile,
'-input',
in4d,
'-FWHM',
'{}'.format(fwhm)] )
res = CommandLine(fullcmd).run()
if res.runtime.returncode == 0:
return fullcmd, outfile
print res.runtime.stderr
return None
def fsl_bandpass(infile, outfile, tr, lowf=0.0083, highf=0.15):
""" use fslmaths to bandpass filter a 4d file"""
startdir = os.getcwd()
pth, nme = os.path.split(infile)
os.chdir(pth)
low_freq = 1 / lowf / 2 / tr
high_freq = 1 / highf / 2 / tr
im = fsl.ImageMaths()
im.inputs.in_file = infile
im.inputs.out_file = outfile
op_str = ' '.join(['-bptf',str(low_freq), str(high_freq)])
im.inputs.op_string = op_str
im.inputs.suffix = 'bpfilter_l%2.2f_h%2.2f'%(low_freq, high_freq)
out = im.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
return out.outputs.out_file
def spm_slicetime(infiles, matlab_cmd='matlab-spm8',stdict = None):
"""
runs slice timing
returns
timecorrected_files
"""
startdir = os.getcwd()
pth, _ = os.path.split(infiles[0])
os.chdir(pth)
if stdict == None:
stdict = get_slicetime_vars(infiles)
sliceorder = stdict['sliceorder']
st = spm.SliceTiming(matlab_cmd = matlab_cmd)
st.inputs.in_files = infiles
st.inputs.ref_slice = np.round(stdict['nslices'] / 2.0).astype(int)
st.inputs.slice_order = sliceorder
st.inputs.time_acquisition = stdict['TA']
st.inputs.time_repetition = stdict['TR']
st.inputs.num_slices = stdict['nslices']
out = st.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
return out.outputs.timecorrected_files
def spm_coregister(moving, target, apply_to_files=None,
matlab_cmd='matlab-spm8'):
"""
runs coregistration for moving to target
"""
startdir = os.getcwd()
pth, _ = os.path.split(moving)
os.chdir(pth)
cr = spm.Coregister(matlab_cmd = matlab_cmd)
cr.inputs.source = moving
cr.inputs.target = target
if apply_to_files is not None:
cr.inputs.apply_to_files = apply_to_files
out = cr.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None, None
else:
return out.outputs.coregistered_source,\
out.outputs.coregistered_files
def update_fsf(fsf, fsf_dict):
""" update fsf with subject specific data
Parameters
----------
fsf : filename
filename of default fsf file with default parameters
to use for your model
fsf_dict : dict
dictionary holding data with the following keys:
nuisance_dir
nuisance_outdir
input_data
TR
nTR
Returns
-------
tmp5 : string
string to write to new fsf file
"""
original = open(fsf).read()
tmp1 = original.replace('nuisance_dir',
fsf_dict['nuisance_dir'])
tmp2 = tmp1.replace('nuisance_model_outputdir',
fsf_dict['nuisance_outdir'])
tmp3 = tmp2.replace('nuisance_model_input_data',
fsf_dict['input_data'])
tmp4 = tmp3.replace('nuisance_model_TR',
fsf_dict['TR'])
tmp5 = tmp4.replace('nuisance_model_numTRs',
fsf_dict['nTR'])
return tmp5
def write_fsf(fsf_string, outfile):
""" writes an updated fsf string (see update_fsf)
to outfile"""
with open(outfile, 'w+') as fid:
fid.write(fsf_string)
def run_feat_model(fsf_file):
""" runs FSL's feat_model which uses the fsf file to generate
files necessary to run film_gls to fit design matrix to timeseries"""
clean_fsf = fsf_file.strip('.fsf')
cmd = 'feat_model %s'%(clean_fsf)
out = CommandLine(cmd).run()
if not out.runtime.returncode == 0:
return None, out.runtime.stderr
mat = fsf_file.replace('.fsf', '.mat')
return mat, cmd
def run_film(data, design, results_dir):
minval = nibabel.load(data).get_data().min()
if minval < 0:
minval=0
film = fsl.FILMGLS()
film.inputs.in_file = data
film.inputs.design_file = design
film.inputs.threshold = minval
film.inputs.results_dir = results_dir
film.inputs.smooth_autocorr = True
film.inputs.mask_size = 5
res = film.run()
return res
| klarnemann/jagust_rsfmri | rsfmri/utils.py | Python | mit | 19,059 |
from .resnet_preact import resnet18_preact
from .resnet_preact_bin import resnet18_preact_bin
import torch, torch.nn as nn
_model_factory = {
"resnet18_preact":resnet18_preact,
"resnet18_preact_bin":resnet18_preact_bin
}
class Classifier(torch.nn.Module):
def __init__(self, feat_extractor,num_classes=None):
super(Classifier,self).__init__()
self.feat_extractor = feat_extractor
self.class_fc = nn.Linear(feat_extractor.fc.in_features, num_classes)
def forward(self,x):
x = self.feat_extractor(x)
class_output = self.class_fc(x)
return class_output
def get_model(arch_name, **kwargs):
backbone = _model_factory[arch_name](**kwargs)
return Classifier(backbone, num_classes = kwargs["num_classes"])
| sankit1/cv-tricks.com | xnornet_plusplus/models/__init__.py | Python | mit | 774 |
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GLUT.freeglut import *
import GlutWrapper
import math
ESCAPE = b'\033'
class GlutViewController(GlutWrapper.GlutWrapper):
"""docstring for GlutViewController"""
def __init__(self):
super(GlutViewController, self).__init__()
self.count = 0.0
def display(self, deltaTime):
self.drawAxis(50)
self.count += 1.0
glRotate(self.count, 0, 1, 0)
glutSolidTeapot(10)
if deltaTime > 0.0:
fpsString = "FPS: %.1f" % (1.0/deltaTime)
self.overlayString(fpsString, 0.0, 0.0)
self.overlayString("LB", 0.0, -1.0)
self.overlayString("RT", -20.0, 0.0)
self.overlayString("RB", -20.0, -1.0)
# User interface -----------------------------------
def mouse(self, button, state, x, y):
# print("MousePress: button: %d, x: %d, y:%d" % (button, x, y))
self.mouseState.button = button
self.mouseState.pressed = ~state
self.mouseState.x = x
self.mouseState.y = y
if button == 3:
self.camera.distance *= 0.875
elif button == 4:
self.camera.distance *= 1.125
def motion(self, x, y):
# print("MouseMove: x: %d, y: %d" % (x, y))
movedX = x - self.mouseState.x
movedY = y - self.mouseState.y
if self.mouseState.button == 0 & self.mouseState.pressed:
self.camera.pan += float(-movedX)/100.0
self.camera.tilt += float(movedY)/100.0
if self.camera.tilt > math.pi/2.0:
self.camera.tilt = math.pi/2.0-0.01
if self.camera.tilt < -math.pi/2.0:
self.camera.tilt = -(math.pi/2.0-0.01)
self.mouseState.x = x
self.mouseState.y = y
def keyboard(self, key, x, y):
print("KeyboardPress: %s" % key)
if key == ESCAPE:
sys.exit()
elif key == b'p':
self.camera.distance *= 0.875
elif key == b'n':
self.camera.distance *= 1.125
def setColor(self, color):
glColor(color[0], color[1], color[2])
glMaterial(GL_FRONT, GL_AMBIENT, color)
glMaterial(GL_FRONT, GL_DIFFUSE, color)
if __name__ == '__main__':
print("Hit ESC key to quit.")
view = GlutViewController()
view.frameTime = 1.0/60.0
view.startFramework()
| kosystem/PythonGlutWrapper | GlutViewController.py | Python | mit | 2,389 |
#!/usr/bin/env python3
# process.py
# This script consists of all core functions.
# Author: Orhan Odabasi (0rh.odabasi[at]gmail.com)
import locale
import csv
import os
from PIL import Image
import re
from collections import Counter
def scanDir(path):
# scan the path and collect media data for copy process
while os.path.exists(path) and os.path.isdir(path):
photos_dataset, totalsize, folder_count, videos_dataset = listphotos(path)
p_count = len(photos_dataset)
p_size = "{:.2f} MB".format(float(totalsize/1000000))
return p_count, p_size, folder_count, photos_dataset, videos_dataset
def saveReport(photo_datas, video_datas, target_path):
# save summary data to a csv file
report_dest_p = os.path.join(target_path, "photo_list.csv")
report_dest_v = os.path.join(target_path, "video_list.csv")
with open(report_dest_p, "w") as f:
w = csv.writer(f, delimiter="\t")
w.writerows(photo_datas)
f.close()
with open(report_dest_v, "w") as f:
w = csv.writer(f, delimiter="\t")
w.writerows(video_datas)
f.close()
def listphotos(path):
# Listing all files in target directory
photos_dataset = []
videos_dataset = []
for root, dirs, files in os.walk(path):
for name in files:
p_data_list = []
v_data_list = []
# filename name [0]
file_name = name
# file path [1]
file_path = os.path.join(root, file_name)
# file size [2]
file_size = os.path.getsize(file_path)
try:
# date taken [3]
date_taken = Image.open(file_path)._getexif()[36867]
# year/month/day format required
ymd_format = re.match("(\d{4}):(\d{2}):(\d{2})", date_taken)
# year taken [4]
year = ymd_format.group(1)
# month taken [5]
month = ymd_format.group(2)
# day taken [6]
day = ymd_format.group(3)
# date info will be our new folder name
date_info = "{0}-{1}".format(year, month)
except:
date_taken = "NOT_FOUND"
day = "NOT_FOUND"
year = "NOT_FOUND"
month = "NOT_FOUND"
# destination folder name [7]
date_info = "NOT_FOUND"
if name.lower().endswith((".jpeg", ".jpg", ".png", ".dng")):
p_data_list.extend([file_name, file_path, file_size, date_taken, year, month, day, date_info])
photos_dataset.append(p_data_list)
elif name.lower().endswith((".mov", ".mkv", ".mp4", ".3gp", ".wmv", ".avi")):
v_data_list.extend([file_name, file_path, file_size, date_taken, year, month, day, date_info])
videos_dataset.append(v_data_list)
# total size of photos archive (only jpeg and png files)
totalsize = 0
for s in photos_dataset:
totalsize += int(s[2])
#total file count
dirs = []
for x in photos_dataset:
dirs.append(x[7])
foldercount = len(Counter(dirs).most_common())
return photos_dataset, totalsize, foldercount, videos_dataset
| OrhanOdabasi/PixPack | pixpack/process.py | Python | mit | 3,262 |
'''
Wrap some important functions in sqlite3 so we can instrument them.
'''
from xrayvision.monkeypatch import mark_patched, is_patched
_old_connect = sqlite3.connect
def patch(module):
module
| mathom/xrayvision | xrayvision/patches/sqlite3/__init__.py | Python | mit | 202 |
from lxml import etree
import os
from BeautifulSoup import BeautifulSoup
from itertools import chain
def replacements(text):
text = text.replace('>', '\\textgreater ')
text = text.replace('<', '\\textless ')
text = text.replace('&', '\&')
text = text.replace('_', '\_')
text = text.replace('%', '\%')
text = text.replace('[', '\lbrack')
text = text.replace(']', '\\rbrack')
return text
def fillContent(tex, srchStr, insStr):
insStr = replacements(insStr)
insIndex = tex.index(srchStr)
tex = tex[:insIndex+len(srchStr)] + insStr + tex[insIndex+len(srchStr):]
return tex
def convertToTex(text, figInTabular=False):
text = replacements(text)
soup = BeautifulSoup(text)
contents = soup.contents[0].contents
retTxt = ''
for content in contents:
if str(type(content)) == "<class 'BeautifulSoup.NavigableString'>":
content = content.replace('\\newline', '~\\\\')
content = content.replace('\\newpara', '~\\\\\\\\')
content = content.replace('\\backslash', '\\textbackslash')
content = content.replace('|', '\\textbar ')
retTxt += content
elif str(type(content)) == "<class 'BeautifulSoup.Tag'>":
if content.name == 'b':
retTxt += '\\textbf{' + convertToTex(str(content)) + '}'
elif content.name == 'u':
retTxt += '\underline{' + convertToTex(str(content)) + '}'
elif content.name == 'i':
retTxt += '\\textit{' + convertToTex(str(content)) + '}'
elif content.name == 'ul':
retTxt += '\n\\begin{itemize}'
for item in content.contents:
if str(type(item)) == "<class 'BeautifulSoup.Tag'>":
retTxt += '\n \item ' + convertToTex(str(item))
retTxt += '\n\end{itemize}\n'
elif content.name == 'chapter':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the chapters do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the chapters' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projChapter}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projChapter}'
elif content.name == 'section':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the sections do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the sections' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projSection}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projSection}'
elif content.name == 'subsection':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the subsections do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the subsections' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projSubSection}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projSubSection}'
elif content.name == 'img':
props = dict(content.attrs)
if not props.has_key('id'):
print "One of the images do not have an 'id' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('src'):
print "One of the images do not have a 'src' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('caption'):
print "One of the images do not have a 'caption' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('scale'):
print "One of the images do not have a 'scale' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif props['id'] == '':
print "One of the images has an empty 'id'. Please correct it and re-run."
exit(0)
elif props['src'] == '':
print "One of the images has an empty 'src'. Please correct it and re-run."
exit(0)
elif props['scale'] == '':
print "Scaling factor for one of the images hasnt been defined. Please correct it and re-run."
exit(0)
else:
if figInTabular:
retTxt += '\\raisebox{-\\totalheight}{\centering\n\includegraphics[scale=' + props['scale'] + ']{' + props['src'] + '}\n\label{' + props['id'] + '}}\n'
else:
retTxt += '\\begin{figure}[ht!]\n\centering\n\includegraphics[scale=' + props['scale'] + ']{' + props['src'] + '}\n\caption{' + props['caption'] + '}\n\label{' + props['id'] + '}\n\end{figure}\n'
elif content.name == 'ref':
props = dict(content.attrs)
if not props.has_key('type'):
print "One of the references doesnt have a 'type' attribute. Please correct it and re-run."
exit(0)
elif props['type'] == '':
print "One of the references has an empty string for 'type'. Please correct it and re-run."
exit(0)
else:
if props['type'] == 'figure':
retTxt += 'Figure \\ref{' + content.text + '}'
elif props['type'] == 'table':
retTxt += 'Table \\ref{' + content.text +'}'
elif content.name == 'table':
props = dict(content.attrs)
if not props.has_key('id'):
print "One of the tables do not have an 'id' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('alignments'):
print "One of the tables do not have a 'alignments' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('caption'):
print "One of the tables do not have a 'caption' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif props['id'] == '':
print "One of the tables has an empty 'id'. Please correct it and re-run."
exit(0)
elif props['alignments'] == '':
print "One of the tables has an empty 'alignments'. Please correct it and re-run."
exit(0)
else:
alignments = props['alignments']
retTxt += '\\begin{table}[h]\\begin{center}\\begin{tabular}{' + alignments + '}'
for horizontal in content.contents:
if str(type(horizontal)) == "<class 'BeautifulSoup.Tag'>":
if horizontal.name == "tr":
cols = horizontal.contents
numOfCols = len(cols)
for i in range(numOfCols):
if str(type(cols[i])) == "<class 'BeautifulSoup.Tag'>":
retTxt += convertToTex(str(cols[i]), figInTabular=True)
print str(cols[i])
if i != numOfCols - 2:
retTxt += ' & '
else:
retTxt += ' \\\\\n'
elif horizontal.name == 'hline':
retTxt += '\hline\n'
retTxt += '\\end{tabular}\\end{center}\\caption{' + props['caption'] + '}\\label{' + props['id'] + '}\\end{table}'
return retTxt
def main():
f = open("fyp.stmplt", "r")
sty = f.read()
f.close()
f = open("fyp.ttmplt", "r")
tex = f.read()
f.close()
f = open("report.xml", "r")
xmlStr = f.read()
f.close()
root = etree.fromstring(xmlStr)
projectTitle = root.find('projectDetails').find('projectTitle').text
guide = root.find('projectDetails').find('guide').text
principal = root.find('projectDetails').find('principal').text
HOD = root.find('projectDetails').find('HOD').text
durationLong = root.find('projectDetails').find('duration').text
collLogoPath = root.find('projectDetails').find('collLogoPath').text
defaultFontFamily = root.find('font').find('defaultFontFamily').text
fontLevelOne = root.find('font').find('levelOne').text
fontLevelTwo = root.find('font').find('levelTwo').text
fontLevelThree = root.find('font').find('levelThree').text
fontLevelFour = root.find('font').find('levelFour').text
numberStrings = ["One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten"]
students = [ (student.find('name').text, student.find('usn').text) for student in root.find('students').getchildren() if student.tag == 'student']
students = [ (numberStrings[i], students[i][0], students[i][1]) for i in range(len(students))]
headerLogoScale = root.find('header').find('logoScale').text
headerTitleSize = root.find('header').find('titleSize').text
headerLineWidth = root.find('header').find('lineWidth').text
dept = root.find('footer').find('dept').text
durationShort = root.find('footer').find('duration').text
footerLineWidth = root.find('footer').find('lineWidth').text
chapterFontFamily = root.find('chapterControls').find('fontFamily').text
coverFontFamily = root.find('cover').find('fontFamily').text
univName = root.find('cover').find('univName').text
univLogoPath = root.find('cover').find('univLogoPath').text
univLogoScale = root.find('cover').find('univLogoScale').text
course = root.find('cover').find('course').text
stream = root.find('cover').find('stream').text
deptName = root.find('cover').find('deptName').text
collName = root.find('cover').find('collName').text
affiliation = root.find('cover').find('affiliation').text
address = root.find('cover').find('address').text
collCoverLogoScale = root.find('cover').find('collCoverLogoScale').text
vspaceInterblock = root.find('cover').find('vspaceInterblock').text
vspaceIntrablock = root.find('cover').find('vspaceIntrablock').text
certificateLogoScale = root.find('certificate').find('logoScale').text
certificateCourse = root.find('certificate').find('course').text
certificateStream = root.find('certificate').find('stream').text
certificateUnivName = root.find('certificate').find('univName').text
abstractFontFamily = root.find('abstractControls').find('fontFamily').text
'''
modifying the tex file
'''
tex = fillContent(tex, 'newcommand{\projectTitle}{', projectTitle)
tex = fillContent(tex, 'newcommand{\guide}{', guide)
tex = fillContent(tex, 'newcommand{\principal}{', principal)
tex = fillContent(tex, 'newcommand{\HOD}{', HOD)
tex = fillContent(tex, 'newcommand{\durationLong}{', durationLong)
tex = fillContent(tex, 'newcommand{\headerLineWidth}{', headerLineWidth)
tex = fillContent(tex, 'newcommand{\\footerLineWidth}{', footerLineWidth)
tex = fillContent(tex, 'newcommand{\collLogoPath}{', collLogoPath)
tex = fillContent(tex, 'newcommand{\defaultFontFamily}{', defaultFontFamily)
tex = fillContent(tex, 'newcommand{\\fontLevelOne}{', fontLevelOne)
tex = fillContent(tex, 'newcommand{\\fontLevelTwo}{', fontLevelTwo)
tex = fillContent(tex, 'newcommand{\\fontLevelThree}{', fontLevelThree)
tex = fillContent(tex, 'newcommand{\\fontLevelFour}{', fontLevelFour)
insIndex = tex.index('@studentsList')
insStr = ''
for student in students:
insStr += '\\newcommand{\\student' + student[0] + '}{' + student[1] + '}\n'
insStr += '\\newcommand{\\usn' + student[0] + '}{' + student[2] + '}\n'
tex = tex[:insIndex] + insStr + tex[insIndex + len('@studentsList'):]
tex = fillContent(tex, 'newcommand{\headerLogoScale}{', headerLogoScale)
tex = fillContent(tex, 'newcommand{\headerTitleSize}{', headerTitleSize)
tex = fillContent(tex, 'newcommand{\dept}{', dept)
tex = fillContent(tex, 'newcommand{\durationShort}{', durationShort)
tex = fillContent(tex, 'newcommand{\chapterFontFamily}{', chapterFontFamily)
tex = fillContent(tex, 'newcommand{\coverFontFamily}{', coverFontFamily)
tex = fillContent(tex, 'newcommand{\univName}{', univName)
tex = fillContent(tex, 'newcommand{\univLogoPath}{', univLogoPath)
tex = fillContent(tex, 'newcommand{\univLogoScale}{', univLogoScale)
tex = fillContent(tex, 'newcommand{\course}{', course)
tex = fillContent(tex, 'newcommand{\stream}{', stream)
tex = fillContent(tex, 'newcommand{\deptName}{', deptName)
tex = fillContent(tex, 'newcommand{\collName}{', collName)
tex = fillContent(tex, 'newcommand{\\affiliation}{', affiliation)
tex = fillContent(tex, 'newcommand{\\address}{', address)
tex = fillContent(tex, 'newcommand{\collCoverLogoScale}{', collCoverLogoScale)
tex = fillContent(tex, 'newcommand{\\vspaceInterblock}{', vspaceInterblock)
tex = fillContent(tex, 'newcommand{\\vspaceIntrablock}{', vspaceIntrablock)
tex = fillContent(tex, 'newcommand{\certificateLogoScale}{', certificateLogoScale)
tex = fillContent(tex, 'newcommand{\certificateCourse}{', certificateCourse)
tex = fillContent(tex, 'newcommand{\certificateStream}{', certificateStream)
tex = fillContent(tex, 'newcommand{\certificateUnivName}{', certificateUnivName)
tex = fillContent(tex, 'newcommand{\\abstractFontFamily}{', abstractFontFamily)
insIndex = tex.index('@acknowledgement')
insStr = etree.tostring(root.find('acknowledgement'))
insStr = convertToTex(insStr)
tex = tex[:insIndex] + insStr + tex[insIndex + len('@acknowledgement'):]
insIndex = tex.index('@abstract')
insStr = etree.tostring(root.find('abstract'))
insStr = convertToTex(insStr)
tex = tex[:insIndex] + insStr + tex[insIndex + len('@abstract'):]
insIndex = tex.index('@chapters')
insStr = ''
chapters = root.findall('chapter')
for chapter in chapters:
insStrTemp = etree.tostring(chapter)
insStrTemp = convertToTex('<content>' + insStrTemp + '</content>')
insStr += insStrTemp + '\n'
tex = tex[:insIndex] + insStr + tex[insIndex + len('@chapters'):]
f = open("sample.tex", "w")
f.write(tex)
f.close()
'''
modifying the style file
'''
#modifying the cover page
coverIndex = sty.index("@studentsListCover")
insStrCover = ''
for i in range(len(students)):
if i == 0:
insStrCover += '\\vspace{\\vspaceInterblock}\n\\textbf{\\student' + students[i][0] + ' - \usn' + students[i][0] + '}\n\n'
else:
insStrCover += '\\vspace{\\vspaceIntrablock}\n\\textbf{\\student' + students[i][0] + ' - \usn' + students[i][0] + '}\n\n'
sty = sty[:coverIndex] + insStrCover + sty[coverIndex + len('@studentsListCover'):]
#modifying the certificate
certIndex = sty.index("@studentsListCertificate")
insStrCertificate = ''
for i in range(len(students)):
if i == 0:
insStrCertificate += '\\vspace{\\vspaceInterblock}\n\\textbf{\student' + students[i][0] + ', \usn' + students[i][0] + '}\n\n'
else:
insStrCertificate += '\\vspace{\\vspaceIntrablock}\n\\textbf{\student' + students[i][0] + ', \usn' + students[i][0] + '}\n\n'
print insStrCertificate
sty = sty[:certIndex] + insStrCertificate + sty[certIndex + len('@studentsListCertificate'):]
f = open("sample.sty", "w")
f.write(sty)
f.close()
os.system("pdflatex sample.tex")
os.system("pdflatex sample.tex") #it must be compiled twice in order to get the table of contents updated properly
if __name__ == '__main__':
main() | vijeshm/eezyReport | eezyReport.py | Python | mit | 16,797 |
import pytest
from mock import Mock
from sigopt.orchestrate.services.aws_provider_bag import AwsProviderServiceBag
class TestOrchestrateServiceBag(object):
@pytest.fixture
def orchestrate_services(self):
return Mock()
def test_orchestrate_service_bag(self, orchestrate_services):
services = AwsProviderServiceBag(orchestrate_services)
assert services.cloudformation_service is not None
assert services.cloudformation_service.client is not None
assert services.cloudformation_service.cloudformation is not None
assert services.ec2_service is not None
assert services.ec2_service.ec2 is not None
assert services.ecr_service is not None
assert services.ecr_service.client is not None
assert services.eks_service is not None
assert services.eks_service.client is not None
assert services.iam_service is not None
assert services.iam_service.client is not None
assert services.iam_service.iam is not None
assert services.sts_service is not None
assert services.sts_service.client is not None
| sigopt/sigopt-python | test/orchestrate/services/aws_provider_bag_test.py | Python | mit | 1,060 |
fa_icons = {
'fa-glass': u"\uf000",
'fa-music': u"\uf001",
'fa-search': u"\uf002",
'fa-envelope-o': u"\uf003",
'fa-heart': u"\uf004",
'fa-star': u"\uf005",
'fa-star-o': u"\uf006",
'fa-user': u"\uf007",
'fa-film': u"\uf008",
'fa-th-large': u"\uf009",
'fa-th': u"\uf00a",
'fa-th-list': u"\uf00b",
'fa-check': u"\uf00c",
'fa-times': u"\uf00d",
'fa-search-plus': u"\uf00e",
'fa-search-minus': u"\uf010",
'fa-power-off': u"\uf011",
'fa-signal': u"\uf012",
'fa-gear': u"\uf013",
'fa-cog': u"\uf013",
'fa-trash-o': u"\uf014",
'fa-home': u"\uf015",
'fa-file-o': u"\uf016",
'fa-clock-o': u"\uf017",
'fa-road': u"\uf018",
'fa-download': u"\uf019",
'fa-arrow-circle-o-down': u"\uf01a",
'fa-arrow-circle-o-up': u"\uf01b",
'fa-inbox': u"\uf01c",
'fa-play-circle-o': u"\uf01d",
'fa-rotate-right': u"\uf01e",
'fa-repeat': u"\uf01e",
'fa-refresh': u"\uf021",
'fa-list-alt': u"\uf022",
'fa-lock': u"\uf023",
'fa-flag': u"\uf024",
'fa-headphones': u"\uf025",
'fa-volume-off': u"\uf026",
'fa-volume-down': u"\uf027",
'fa-volume-up': u"\uf028",
'fa-qrcode': u"\uf029",
'fa-barcode': u"\uf02a",
'fa-tag': u"\uf02b",
'fa-tags': u"\uf02c",
'fa-book': u"\uf02d",
'fa-bookmark': u"\uf02e",
'fa-print': u"\uf02f",
'fa-camera': u"\uf030",
'fa-font': u"\uf031",
'fa-bold': u"\uf032",
'fa-italic': u"\uf033",
'fa-text-height': u"\uf034",
'fa-text-width': u"\uf035",
'fa-align-left': u"\uf036",
'fa-align-center': u"\uf037",
'fa-align-right': u"\uf038",
'fa-align-justify': u"\uf039",
'fa-list': u"\uf03a",
'fa-dedent': u"\uf03b",
'fa-outdent': u"\uf03b",
'fa-indent': u"\uf03c",
'fa-video-camera': u"\uf03d",
'fa-photo': u"\uf03e",
'fa-image': u"\uf03e",
'fa-picture-o': u"\uf03e",
'fa-pencil': u"\uf040",
'fa-map-marker': u"\uf041",
'fa-adjust': u"\uf042",
'fa-tint': u"\uf043",
'fa-edit': u"\uf044",
'fa-pencil-square-o': u"\uf044",
'fa-share-square-o': u"\uf045",
'fa-check-square-o': u"\uf046",
'fa-arrows': u"\uf047",
'fa-step-backward': u"\uf048",
'fa-fast-backward': u"\uf049",
'fa-backward': u"\uf04a",
'fa-play': u"\uf04b",
'fa-pause': u"\uf04c",
'fa-stop': u"\uf04d",
'fa-forward': u"\uf04e",
'fa-fast-forward': u"\uf050",
'fa-step-forward': u"\uf051",
'fa-eject': u"\uf052",
'fa-chevron-left': u"\uf053",
'fa-chevron-right': u"\uf054",
'fa-plus-circle': u"\uf055",
'fa-minus-circle': u"\uf056",
'fa-times-circle': u"\uf057",
'fa-check-circle': u"\uf058",
'fa-question-circle': u"\uf059",
'fa-info-circle': u"\uf05a",
'fa-crosshairs': u"\uf05b",
'fa-times-circle-o': u"\uf05c",
'fa-check-circle-o': u"\uf05d",
'fa-ban': u"\uf05e",
'fa-arrow-left': u"\uf060",
'fa-arrow-right': u"\uf061",
'fa-arrow-up': u"\uf062",
'fa-arrow-down': u"\uf063",
'fa-mail-forward': u"\uf064",
'fa-share': u"\uf064",
'fa-expand': u"\uf065",
'fa-compress': u"\uf066",
'fa-plus': u"\uf067",
'fa-minus': u"\uf068",
'fa-asterisk': u"\uf069",
'fa-exclamation-circle': u"\uf06a",
'fa-gift': u"\uf06b",
'fa-leaf': u"\uf06c",
'fa-fire': u"\uf06d",
'fa-eye': u"\uf06e",
'fa-eye-slash': u"\uf070",
'fa-warning': u"\uf071",
'fa-exclamation-triangle': u"\uf071",
'fa-plane': u"\uf072",
'fa-calendar': u"\uf073",
'fa-random': u"\uf074",
'fa-comment': u"\uf075",
'fa-magnet': u"\uf076",
'fa-chevron-up': u"\uf077",
'fa-chevron-down': u"\uf078",
'fa-retweet': u"\uf079",
'fa-shopping-cart': u"\uf07a",
'fa-folder': u"\uf07b",
'fa-folder-open': u"\uf07c",
'fa-arrows-v': u"\uf07d",
'fa-arrows-h': u"\uf07e",
'fa-bar-chart-o': u"\uf080",
'fa-twitter-square': u"\uf081",
'fa-facebook-square': u"\uf082",
'fa-camera-retro': u"\uf083",
'fa-key': u"\uf084",
'fa-gears': u"\uf085",
'fa-cogs': u"\uf085",
'fa-comments': u"\uf086",
'fa-thumbs-o-up': u"\uf087",
'fa-thumbs-o-down': u"\uf088",
'fa-star-half': u"\uf089",
'fa-heart-o': u"\uf08a",
'fa-sign-out': u"\uf08b",
'fa-linkedin-square': u"\uf08c",
'fa-thumb-tack': u"\uf08d",
'fa-external-link': u"\uf08e",
'fa-sign-in': u"\uf090",
'fa-trophy': u"\uf091",
'fa-github-square': u"\uf092",
'fa-upload': u"\uf093",
'fa-lemon-o': u"\uf094",
'fa-phone': u"\uf095",
'fa-square-o': u"\uf096",
'fa-bookmark-o': u"\uf097",
'fa-phone-square': u"\uf098",
'fa-twitter': u"\uf099",
'fa-facebook': u"\uf09a",
'fa-github': u"\uf09b",
'fa-unlock': u"\uf09c",
'fa-credit-card': u"\uf09d",
'fa-rss': u"\uf09e",
'fa-hdd-o': u"\uf0a0",
'fa-bullhorn': u"\uf0a1",
'fa-bell': u"\uf0f3",
'fa-certificate': u"\uf0a3",
'fa-hand-o-right': u"\uf0a4",
'fa-hand-o-left': u"\uf0a5",
'fa-hand-o-up': u"\uf0a6",
'fa-hand-o-down': u"\uf0a7",
'fa-arrow-circle-left': u"\uf0a8",
'fa-arrow-circle-right': u"\uf0a9",
'fa-arrow-circle-up': u"\uf0aa",
'fa-arrow-circle-down': u"\uf0ab",
'fa-globe': u"\uf0ac",
'fa-wrench': u"\uf0ad",
'fa-tasks': u"\uf0ae",
'fa-filter': u"\uf0b0",
'fa-briefcase': u"\uf0b1",
'fa-arrows-alt': u"\uf0b2",
'fa-group': u"\uf0c0",
'fa-users': u"\uf0c0",
'fa-chain': u"\uf0c1",
'fa-link': u"\uf0c1",
'fa-cloud': u"\uf0c2",
'fa-flask': u"\uf0c3",
'fa-cut': u"\uf0c4",
'fa-scissors': u"\uf0c4",
'fa-copy': u"\uf0c5",
'fa-files-o': u"\uf0c5",
'fa-paperclip': u"\uf0c6",
'fa-save': u"\uf0c7",
'fa-floppy-o': u"\uf0c7",
'fa-square': u"\uf0c8",
'fa-navicon': u"\uf0c9",
'fa-reorder': u"\uf0c9",
'fa-bars': u"\uf0c9",
'fa-list-ul': u"\uf0ca",
'fa-list-ol': u"\uf0cb",
'fa-strikethrough': u"\uf0cc",
'fa-underline': u"\uf0cd",
'fa-table': u"\uf0ce",
'fa-magic': u"\uf0d0",
'fa-truck': u"\uf0d1",
'fa-pinterest': u"\uf0d2",
'fa-pinterest-square': u"\uf0d3",
'fa-google-plus-square': u"\uf0d4",
'fa-google-plus': u"\uf0d5",
'fa-money': u"\uf0d6",
'fa-caret-down': u"\uf0d7",
'fa-caret-up': u"\uf0d8",
'fa-caret-left': u"\uf0d9",
'fa-caret-right': u"\uf0da",
'fa-columns': u"\uf0db",
'fa-unsorted': u"\uf0dc",
'fa-sort': u"\uf0dc",
'fa-sort-down': u"\uf0dd",
'fa-sort-desc': u"\uf0dd",
'fa-sort-up': u"\uf0de",
'fa-sort-asc': u"\uf0de",
'fa-envelope': u"\uf0e0",
'fa-linkedin': u"\uf0e1",
'fa-rotate-left': u"\uf0e2",
'fa-undo': u"\uf0e2",
'fa-legal': u"\uf0e3",
'fa-gavel': u"\uf0e3",
'fa-dashboard': u"\uf0e4",
'fa-tachometer': u"\uf0e4",
'fa-comment-o': u"\uf0e5",
'fa-comments-o': u"\uf0e6",
'fa-flash': u"\uf0e7",
'fa-bolt': u"\uf0e7",
'fa-sitemap': u"\uf0e8",
'fa-umbrella': u"\uf0e9",
'fa-paste': u"\uf0ea",
'fa-clipboard': u"\uf0ea",
'fa-lightbulb-o': u"\uf0eb",
'fa-exchange': u"\uf0ec",
'fa-cloud-download': u"\uf0ed",
'fa-cloud-upload': u"\uf0ee",
'fa-user-md': u"\uf0f0",
'fa-stethoscope': u"\uf0f1",
'fa-suitcase': u"\uf0f2",
'fa-bell-o': u"\uf0a2",
'fa-coffee': u"\uf0f4",
'fa-cutlery': u"\uf0f5",
'fa-file-text-o': u"\uf0f6",
'fa-building-o': u"\uf0f7",
'fa-hospital-o': u"\uf0f8",
'fa-ambulance': u"\uf0f9",
'fa-medkit': u"\uf0fa",
'fa-fighter-jet': u"\uf0fb",
'fa-beer': u"\uf0fc",
'fa-h-square': u"\uf0fd",
'fa-plus-square': u"\uf0fe",
'fa-angle-double-left': u"\uf100",
'fa-angle-double-right': u"\uf101",
'fa-angle-double-up': u"\uf102",
'fa-angle-double-down': u"\uf103",
'fa-angle-left': u"\uf104",
'fa-angle-right': u"\uf105",
'fa-angle-up': u"\uf106",
'fa-angle-down': u"\uf107",
'fa-desktop': u"\uf108",
'fa-laptop': u"\uf109",
'fa-tablet': u"\uf10a",
'fa-mobile-phone': u"\uf10b",
'fa-mobile': u"\uf10b",
'fa-circle-o': u"\uf10c",
'fa-quote-left': u"\uf10d",
'fa-quote-right': u"\uf10e",
'fa-spinner': u"\uf110",
'fa-circle': u"\uf111",
'fa-mail-reply': u"\uf112",
'fa-reply': u"\uf112",
'fa-github-alt': u"\uf113",
'fa-folder-o': u"\uf114",
'fa-folder-open-o': u"\uf115",
'fa-smile-o': u"\uf118",
'fa-frown-o': u"\uf119",
'fa-meh-o': u"\uf11a",
'fa-gamepad': u"\uf11b",
'fa-keyboard-o': u"\uf11c",
'fa-flag-o': u"\uf11d",
'fa-flag-checkered': u"\uf11e",
'fa-terminal': u"\uf120",
'fa-code': u"\uf121",
'fa-mail-reply-all': u"\uf122",
'fa-reply-all': u"\uf122",
'fa-star-half-empty': u"\uf123",
'fa-star-half-full': u"\uf123",
'fa-star-half-o': u"\uf123",
'fa-location-arrow': u"\uf124",
'fa-crop': u"\uf125",
'fa-code-fork': u"\uf126",
'fa-unlink': u"\uf127",
'fa-chain-broken': u"\uf127",
'fa-question': u"\uf128",
'fa-info': u"\uf129",
'fa-exclamation': u"\uf12a",
'fa-superscript': u"\uf12b",
'fa-subscript': u"\uf12c",
'fa-eraser': u"\uf12d",
'fa-puzzle-piece': u"\uf12e",
'fa-microphone': u"\uf130",
'fa-microphone-slash': u"\uf131",
'fa-shield': u"\uf132",
'fa-calendar-o': u"\uf133",
'fa-fire-extinguisher': u"\uf134",
'fa-rocket': u"\uf135",
'fa-maxcdn': u"\uf136",
'fa-chevron-circle-left': u"\uf137",
'fa-chevron-circle-right': u"\uf138",
'fa-chevron-circle-up': u"\uf139",
'fa-chevron-circle-down': u"\uf13a",
'fa-html5': u"\uf13b",
'fa-css3': u"\uf13c",
'fa-anchor': u"\uf13d",
'fa-unlock-alt': u"\uf13e",
'fa-bullseye': u"\uf140",
'fa-ellipsis-h': u"\uf141",
'fa-ellipsis-v': u"\uf142",
'fa-rss-square': u"\uf143",
'fa-play-circle': u"\uf144",
'fa-ticket': u"\uf145",
'fa-minus-square': u"\uf146",
'fa-minus-square-o': u"\uf147",
'fa-level-up': u"\uf148",
'fa-level-down': u"\uf149",
'fa-check-square': u"\uf14a",
'fa-pencil-square': u"\uf14b",
'fa-external-link-square': u"\uf14c",
'fa-share-square': u"\uf14d",
'fa-compass': u"\uf14e",
'fa-toggle-down': u"\uf150",
'fa-caret-square-o-down': u"\uf150",
'fa-toggle-up': u"\uf151",
'fa-caret-square-o-up': u"\uf151",
'fa-toggle-right': u"\uf152",
'fa-caret-square-o-right': u"\uf152",
'fa-euro': u"\uf153",
'fa-eur': u"\uf153",
'fa-gbp': u"\uf154",
'fa-dollar': u"\uf155",
'fa-usd': u"\uf155",
'fa-rupee': u"\uf156",
'fa-inr': u"\uf156",
'fa-cny': u"\uf157",
'fa-rmb': u"\uf157",
'fa-yen': u"\uf157",
'fa-jpy': u"\uf157",
'fa-ruble': u"\uf158",
'fa-rouble': u"\uf158",
'fa-rub': u"\uf158",
'fa-won': u"\uf159",
'fa-krw': u"\uf159",
'fa-bitcoin': u"\uf15a",
'fa-btc': u"\uf15a",
'fa-file': u"\uf15b",
'fa-file-text': u"\uf15c",
'fa-sort-alpha-asc': u"\uf15d",
'fa-sort-alpha-desc': u"\uf15e",
'fa-sort-amount-asc': u"\uf160",
'fa-sort-amount-desc': u"\uf161",
'fa-sort-numeric-asc': u"\uf162",
'fa-sort-numeric-desc': u"\uf163",
'fa-thumbs-up': u"\uf164",
'fa-thumbs-down': u"\uf165",
'fa-youtube-square': u"\uf166",
'fa-youtube': u"\uf167",
'fa-xing': u"\uf168",
'fa-xing-square': u"\uf169",
'fa-youtube-play': u"\uf16a",
'fa-dropbox': u"\uf16b",
'fa-stack-overflow': u"\uf16c",
'fa-instagram': u"\uf16d",
'fa-flickr': u"\uf16e",
'fa-adn': u"\uf170",
'fa-bitbucket': u"\uf171",
'fa-bitbucket-square': u"\uf172",
'fa-tumblr': u"\uf173",
'fa-tumblr-square': u"\uf174",
'fa-long-arrow-down': u"\uf175",
'fa-long-arrow-up': u"\uf176",
'fa-long-arrow-left': u"\uf177",
'fa-long-arrow-right': u"\uf178",
'fa-apple': u"\uf179",
'fa-windows': u"\uf17a",
'fa-android': u"\uf17b",
'fa-linux': u"\uf17c",
'fa-dribbble': u"\uf17d",
'fa-skype': u"\uf17e",
'fa-foursquare': u"\uf180",
'fa-trello': u"\uf181",
'fa-female': u"\uf182",
'fa-male': u"\uf183",
'fa-gittip': u"\uf184",
'fa-sun-o': u"\uf185",
'fa-moon-o': u"\uf186",
'fa-archive': u"\uf187",
'fa-bug': u"\uf188",
'fa-vk': u"\uf189",
'fa-weibo': u"\uf18a",
'fa-renren': u"\uf18b",
'fa-pagelines': u"\uf18c",
'fa-stack-exchange': u"\uf18d",
'fa-arrow-circle-o-right': u"\uf18e",
'fa-arrow-circle-o-left': u"\uf190",
'fa-toggle-left': u"\uf191",
'fa-caret-square-o-left': u"\uf191",
'fa-dot-circle-o': u"\uf192",
'fa-wheelchair': u"\uf193",
'fa-vimeo-square': u"\uf194",
'fa-turkish-lira': u"\uf195",
'fa-try': u"\uf195",
'fa-plus-square-o': u"\uf196",
'fa-space-shuttle': u"\uf197",
'fa-slack': u"\uf198",
'fa-envelope-square': u"\uf199",
'fa-wordpress': u"\uf19a",
'fa-openid': u"\uf19b",
'fa-institution': u"\uf19c",
'fa-bank': u"\uf19c",
'fa-university': u"\uf19c",
'fa-mortar-board': u"\uf19d",
'fa-graduation-cap': u"\uf19d",
'fa-yahoo': u"\uf19e",
'fa-google': u"\uf1a0",
'fa-reddit': u"\uf1a1",
'fa-reddit-square': u"\uf1a2",
'fa-stumbleupon-circle': u"\uf1a3",
'fa-stumbleupon': u"\uf1a4",
'fa-delicious': u"\uf1a5",
'fa-digg': u"\uf1a6",
'fa-pied-piper-square': u"\uf1a7",
'fa-pied-piper': u"\uf1a7",
'fa-pied-piper-alt': u"\uf1a8",
'fa-drupal': u"\uf1a9",
'fa-joomla': u"\uf1aa",
'fa-language': u"\uf1ab",
'fa-fax': u"\uf1ac",
'fa-building': u"\uf1ad",
'fa-child': u"\uf1ae",
'fa-paw': u"\uf1b0",
'fa-spoon': u"\uf1b1",
'fa-cube': u"\uf1b2",
'fa-cubes': u"\uf1b3",
'fa-behance': u"\uf1b4",
'fa-behance-square': u"\uf1b5",
'fa-steam': u"\uf1b6",
'fa-steam-square': u"\uf1b7",
'fa-recycle': u"\uf1b8",
'fa-automobile': u"\uf1b9",
'fa-car': u"\uf1b9",
'fa-cab': u"\uf1ba",
'fa-taxi': u"\uf1ba",
'fa-tree': u"\uf1bb",
'fa-spotify': u"\uf1bc",
'fa-deviantart': u"\uf1bd",
'fa-soundcloud': u"\uf1be",
'fa-database': u"\uf1c0",
'fa-file-pdf-o': u"\uf1c1",
'fa-file-word-o': u"\uf1c2",
'fa-file-excel-o': u"\uf1c3",
'fa-file-powerpoint-o': u"\uf1c4",
'fa-file-photo-o': u"\uf1c5",
'fa-file-picture-o': u"\uf1c5",
'fa-file-image-o': u"\uf1c5",
'fa-file-zip-o': u"\uf1c6",
'fa-file-archive-o': u"\uf1c6",
'fa-file-sound-o': u"\uf1c7",
'fa-file-audio-o': u"\uf1c7",
'fa-file-movie-o': u"\uf1c8",
'fa-file-video-o': u"\uf1c8",
'fa-file-code-o': u"\uf1c9",
'fa-vine': u"\uf1ca",
'fa-codepen': u"\uf1cb",
'fa-jsfiddle': u"\uf1cc",
'fa-life-bouy': u"\uf1cd",
'fa-life-saver': u"\uf1cd",
'fa-support': u"\uf1cd",
'fa-life-ring': u"\uf1cd",
'fa-circle-o-notch': u"\uf1ce",
'fa-ra': u"\uf1d0",
'fa-rebel': u"\uf1d0",
'fa-ge': u"\uf1d1",
'fa-empire': u"\uf1d1",
'fa-git-square': u"\uf1d2",
'fa-git': u"\uf1d3",
'fa-hacker-news': u"\uf1d4",
'fa-tencent-weibo': u"\uf1d5",
'fa-qq': u"\uf1d6",
'fa-wechat': u"\uf1d7",
'fa-weixin': u"\uf1d7",
'fa-send': u"\uf1d8",
'fa-paper-plane': u"\uf1d8",
'fa-send-o': u"\uf1d9",
'fa-paper-plane-o': u"\uf1d9",
'fa-history': u"\uf1da",
'fa-circle-thin': u"\uf1db",
'fa-header': u"\uf1dc",
'fa-paragraph': u"\uf1dd",
'fa-sliders': u"\uf1de",
'fa-share-alt': u"\uf1e0",
'fa-share-alt-square': u"\uf1e1",
'fa-bomb': u"\uf1e2",
} | Kovak/KivyNBT | flat_kivy/fa_icon_definitions.py | Python | mit | 15,461 |
#!/usr/bin/env python3
"""*.h5 の値の最小・最大などを確認するスクリプト。"""
import argparse
import pathlib
import sys
import h5py
import numpy as np
try:
import pytoolkit as tk
except ImportError:
sys.path.insert(0, str(pathlib.Path(__file__).resolve().parent.parent.parent))
import pytoolkit as tk
logger = tk.log.get(__name__)
def main():
tk.utils.better_exceptions()
tk.log.init(None)
parser = argparse.ArgumentParser(description="*.h5 の値の最小・最大などを確認するスクリプト。")
parser.add_argument("model_path", type=pathlib.Path, help="対象ファイルのパス(*.h5)")
args = parser.parse_args()
logger.info(f"{args.model_path} Loading...")
absmax_list = []
with h5py.File(args.model_path, mode="r") as f:
model_weights = f["model_weights"]
layer_names = model_weights.attrs["layer_names"]
for layer_name in layer_names:
g = model_weights[layer_name]
weight_names = g.attrs["weight_names"]
for weight_name in weight_names:
w = np.asarray(g[weight_name])
key = f"/model_weights/{layer_name}/{weight_name}"
if w.size == 1:
logger.info(f"{key}\t value={np.ravel(w)[0]:.2f}")
else:
logger.info(
f"{key}\t min={w.min():.2f} max={w.max():.2f} mean={w.mean():.2f} std={w.std():.2f}"
)
absmax_list.append((key, np.abs(w).max()))
logger.info("abs Top-10:")
for key, absvalue in list(sorted(absmax_list, key=lambda x: -x[1]))[:10]:
logger.info(f"{absvalue:6.1f}: {key}")
if __name__ == "__main__":
main()
| ak110/pytoolkit | pytoolkit/bin/h5ls.py | Python | mit | 1,745 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from .. import activations, initializations
from ..utils.theano_utils import shared_zeros
from ..layers.core import Layer
class Convolution1D(Layer):
def __init__(self, nb_filter, stack_size, filter_length,
init='uniform', activation='linear', weights=None,
border_mode='valid', subsample_length=1,
W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
nb_row = 1
nb_col = filter_length
self.nb_filter = nb_filter
self.stack_size = stack_size
self.filter_length = filter_length
self.subsample_length = subsample_length
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = (1, subsample_length)
self.border_mode = border_mode
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
if W_regularizer:
W_regularizer.set_param(self.W)
self.regularizers.append(W_regularizer)
if b_regularizer:
b_regularizer.set_param(self.b)
self.regularizers.append(b_regularizer)
if activity_regularizer:
activity_regularizer.set_layer(self)
self.regularizers.append(activity_regularizer)
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
conv_out = theano.tensor.nnet.conv.conv2d(X, self.W,
border_mode=self.border_mode, subsample=self.subsample)
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return output
def get_config(self):
return {"name":self.__class__.__name__,
"nb_filter":self.nb_filter,
"stack_size":self.stack_size,
"filter_length":self.filter_length,
"init":self.init.__name__,
"activation":self.activation.__name__,
"border_mode":self.border_mode,
"subsample_length":self.subsample_length}
class MaxPooling1D(Layer):
def __init__(self, pool_length=2, ignore_border=True):
self.pool_length = pool_length
self.poolsize = (1, pool_length)
self.ignore_border = ignore_border
self.input = T.tensor4()
self.params = []
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"pool_length":self.pool_length,
"ignore_border":self.ignore_border}
class Convolution2D(Layer):
def __init__(self, nb_filter, stack_size, nb_row, nb_col,
init='glorot_uniform', activation='linear', weights=None,
border_mode='valid', subsample=(1, 1),
W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
super(Convolution2D,self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = subsample
self.border_mode = border_mode
self.nb_filter = nb_filter
self.stack_size = stack_size
self.nb_row = nb_row
self.nb_col = nb_col
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
if W_regularizer:
W_regularizer.set_param(self.W)
self.regularizers.append(W_regularizer)
if b_regularizer:
b_regularizer.set_param(self.b)
self.regularizers.append(b_regularizer)
if activity_regularizer:
activity_regularizer.set_layer(self)
self.regularizers.append(activity_regularizer)
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
conv_out = theano.tensor.nnet.conv.conv2d(X, self.W,
border_mode=self.border_mode, subsample=self.subsample)
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return output
def get_config(self):
return {"name":self.__class__.__name__,
"nb_filter":self.nb_filter,
"stack_size":self.stack_size,
"nb_row":self.nb_row,
"nb_col":self.nb_col,
"init":self.init.__name__,
"activation":self.activation.__name__,
"border_mode":self.border_mode,
"subsample":self.subsample}
class MaxPooling2D(Layer):
def __init__(self, poolsize=(2, 2), ignore_border=True):
super(MaxPooling2D,self).__init__()
self.input = T.tensor4()
self.poolsize = poolsize
self.ignore_border = ignore_border
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"poolsize":self.poolsize,
"ignore_border":self.ignore_border}
# class ZeroPadding2D(Layer): TODO
# class Convolution3D: TODO
# class MaxPooling3D: TODO
| stonebig/keras | keras/layers/convolutional.py | Python | mit | 5,924 |
import os
import sys
import time
import subprocess
import yaml
import pathlib
class ClusterLauncher:
def __init__(self, config_yaml):
self.Config = config_yaml
def Launch(self):
#read config
with open(self.Config, 'r') as yf:
config = yaml.safe_load(yf)
#launch head node
head = ClusterLauncher.LaunchUniCAVEWindow(config["build-path"], config["head-node"])
#wait a bit before launching child nodes
if config["head-wait"] is not None:
time.sleep(config["head-wait"])
#launch child nodes
children = []
for child_node in config["child-nodes"]:
children.append(ClusterLauncher.LaunchUniCAVEWindow(config["build-path"], child_node))
#wait a bit between launching each child
if config["child-wait"] is not None:
time.sleep(config["child-wait"])
#poll head node process
done = False
while not done:
if head.poll() is not None:
done = True
time.sleep(config["sleep-time"])
#when done, close child processes and exit
for child in children:
child.kill()
@staticmethod
def LaunchUniCAVEWindow(path, machine_name=None):
args = [path, "-popupWindow"]
if machine_name is not None:
args = args + ["overrideMachineName", machine_name]
return subprocess.Popen(args)
if __name__ == "__main__":
ClusterLauncher(os.path.join(pathlib.Path(__file__).parent.absolute(), "config/input_test.yaml")).Launch() | livingenvironmentslab/UniCAVE | Python-Cluster-Launcher/ClusterLauncher.py | Python | mit | 1,680 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.conf import settings
from django.forms.utils import ValidationError
from os import chmod
import hashlib
from io import BytesIO
try:
import pyclamd
except Exception:
pass
class yatsFileField(forms.FileField):
default_error_messages = {
'virus_found': _(u"file is infected by virus: %s"),
'virus_engine_error': _(u'unable to initialize scan engine on host %s')
}
def clean(self, data, initial=None):
f = super(yatsFileField, self).clean(initial or data)
if f is None:
return None
elif not data and initial:
return initial
if settings.FILE_UPLOAD_VIRUS_SCAN and pyclamd:
# virus scan
try:
if not hasattr(pyclamd, 'scan_stream'):
cd = pyclamd.ClamdUnixSocket()
else:
pyclamd.init_network_socket('localhost', 3310)
cd = pyclamd
# We need to get a file object for clamav. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
chmod(data.temporary_file_path(), 0o664)
result = cd.scan_file(data.temporary_file_path())
else:
if hasattr(data, 'read'):
result = cd.scan_stream(data.read())
else:
result = cd.scan_stream(data['content'])
except:
from socket import gethostname
raise ValidationError(self.error_messages['virus_engine_error'] % gethostname())
if result:
msg = ' '.join(result[result.keys()[0]]).replace('FOUND ', '')
raise ValidationError(self.error_messages['virus_found'] % msg)
hasher = hashlib.md5()
# We need to get a file object for clamav. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
with open(data.temporary_file_path(), 'rb') as afile:
buf = afile.read()
hasher.update(buf)
else:
if hasattr(data, 'read'):
data.seek(0)
buf = data.read()
hasher.update(buf)
else:
hasher.update(data['content'].read())
f.hash = hasher.hexdigest()
return f
| mediafactory/yats | modules/yats/fields.py | Python | mit | 2,574 |
import click
import os
import penguin.pdf as pdf
import penguin.utils as utils
def check_src(src):
if not all((map(utils.is_valid_source, src))):
raise click.BadParameter("src arguments must be either a valid directory"
" or pdf file.")
@click.group()
def penguin():
pass
@penguin.command()
@click.argument('src', nargs=-1)
@click.argument('dst')
@click.option('--bookmark', 'bookmark', flag_value='include-bookmarks',
default=True)
@click.option('--remove-blank-pages', 'rmblanks', flag_value='remove-blanks-pages',
default=False)
def combine(src, dst, bookmark, rmblanks):
"""Combine Pdf files from the source provided into the destination file.
:param src: The source Pdf file(s). src can either be a list of individual
files or directories containing Pdf files.
:param dst: The output file destination.
:param bookmark: True if the combined Pdf should include bookmarks.
:param rmblanks: True if blank pages should be removed from the combined Pdf.
"""
check_src(src)
combined_pdf = pdf.combine(src, bookmark, rmblanks)
with open(dst, 'wb') as f:
combined_pdf.write(f)
@penguin.command()
@click.argument('src',)
@click.argument('pages', nargs=-1)
@click.argument('dst')
def split(src, pages, dst):
"""Split the specified pages from src into the the dst.
:param src: The source Pdf file (directory).
:param pages: The page number(s) to extract from each file.
:param dst: The output file destination.
"""
check_src(src)
combined_pdf = pdf.split(src, pages)
with open(dst, 'wb') as f:
combined_pdf.write(f)
if __name__ == '__main__':
penguin()
| zrluety/penguin | penguin/scripts/penguin_cli.py | Python | mit | 1,738 |
import math
def even_numbers_only(thelist):
'''
Returns a list of even numbers in thelist
'''
return [x for x in thelist if x%2 == 0]
def is_perfect_square(x):
'''
Returns True if x is a perfect square, False otherwise
'''
thesqrt = int(math.sqrt(x))
return thesqrt * thesqrt == x
| joequery/joequery.me | joequery/blog/posts/code/python-builtin-functions/simple_functions.py | Python | mit | 319 |
from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
import time
from datetime import timedelta
# Import MNIST data
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Training Parameters
learning_rate = 0.005
training_steps = 15000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size,
# n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
loss_group = []
epoch_group = []
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps + 1):
tf.set_random_seed(23)
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
loss_group.append(loss)
epoch_group.append(step)
print("Step " + str(step) + ", Minibatch Loss= " +
"{:.4f}".format(loss) + ", Training Accuracy= " +
"{:.3f}".format(acc))
print("Optimization Finished!")
print(loss_group)
print(epoch_group)
plt.plot(epoch_group, loss_group)
plt.show()
end_time = time.time()
time_dif = end_time - start_time
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape(
(-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:",
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
| torn2537/AnimationJava | LSTM2.py | Python | mit | 3,840 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# References:
# [1] Jean-Luc Starck, Fionn Murtagh & Jalal M. Fadili
# Sparse Image and Signal Processing: Wavelets, Curvelets, Morphological Diversity
# Section 3.5, 6.6
#
# Credits:
# [1] https://github.com/abrazhe/image-funcut/blob/master/imfun/atrous.py
#
# Aaron LI
# Created: 2016-03-17
# Updated: 2016-04-22
#
# ChangeLog:
# 2016-04-22:
# * Add argument "end-scale" to specifiy the end denoising scale
# * Check outfile existence first
# * Add argument "start-scale" to specifiy the start denoising scale
# * Fix a bug about "p_cutoff" when "comp" contains ALL False's
# * Show more verbose information/details
# 2016-04-20:
# * Add argparse and main() for scripting
#
"""
Starlet wavelet transform, i.e., isotropic undecimated wavelet transform
(IUWT), or à trous wavelet transform.
And multi-scale variance stabling transform (MS-VST), which can be used
to effectively remove the Poisson noises.
"""
__version__ = "0.2.5"
__date__ = "2016-04-22"
import sys
import os
import argparse
from datetime import datetime
import numpy as np
import scipy as sp
from scipy import signal
from astropy.io import fits
class B3Spline: # {{{
"""
B3-spline wavelet.
"""
# scaling function (phi)
dec_lo = np.array([1.0, 4.0, 6.0, 4.0, 1.0]) / 16
dec_hi = np.array([-1.0, -4.0, 10.0, -4.0, -1.0]) / 16
rec_lo = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
rec_hi = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
# B3Spline }}}
class IUWT: # {{{
"""
Isotropic undecimated wavelet transform.
"""
## Decomposition filters list:
# a_{scale} = convole(a_0, filters[scale])
# Note: the zero-th scale filter (i.e., delta function) is the first
# element, thus the array index is the same as the decomposition scale.
filters = []
phi = None # wavelet scaling function (2D)
level = 0 # number of transform level
decomposition = None # decomposed coefficients/images
reconstruction = None # reconstructed image
# convolution boundary condition
boundary = "symm"
def __init__(self, phi=B3Spline.dec_lo, level=None, boundary="symm",
data=None):
self.set_wavelet(phi=phi)
self.level = level
self.boundary = boundary
self.data = np.array(data)
def reset(self):
"""
Reset the object attributes.
"""
self.data = None
self.phi = None
self.decomposition = None
self.reconstruction = None
self.level = 0
self.filters = []
self.boundary = "symm"
def load_data(self, data):
self.reset()
self.data = np.array(data)
def set_wavelet(self, phi):
self.reset()
phi = np.array(phi)
if phi.ndim == 1:
phi_ = phi.reshape(1, -1)
self.phi = np.dot(phi_.T, phi_)
elif phi.ndim == 2:
self.phi = phi
else:
raise ValueError("Invalid phi dimension")
def calc_filters(self):
"""
Calculate the convolution filters of each scale.
Note: the zero-th scale filter (i.e., delta function) is the first
element, thus the array index is the same as the decomposition scale.
"""
self.filters = []
# scale 0: delta function
h = np.array([[1]]) # NOTE: 2D
self.filters.append(h)
# scale 1
h = self.phi[::-1, ::-1]
self.filters.append(h)
for scale in range(2, self.level+1):
h_up = self.zupsample(self.phi, order=scale-1)
h2 = signal.convolve2d(h_up[::-1, ::-1], h, mode="same",
boundary=self.boundary)
self.filters.append(h2)
def transform(self, data, scale, boundary="symm"):
"""
Perform only one scale wavelet transform for the given data.
return:
[ approx, detail ]
"""
self.decomposition = []
approx = signal.convolve2d(data, self.filters[scale],
mode="same", boundary=self.boundary)
detail = data - approx
return [approx, detail]
def decompose(self, level, boundary="symm"):
"""
Perform IUWT decomposition in the plain loop way.
The filters of each scale/level are calculated first, then the
approximations of each scale/level are calculated by convolving the
raw/finest image with these filters.
return:
[ W_1, W_2, ..., W_n, A_n ]
n = level
W: wavelet details
A: approximation
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.decomposition = []
approx = self.data
for scale in range(1, level+1):
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = approx - approx2
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
return self.decomposition
def decompose_recursive(self, level, boundary="symm"):
"""
Perform the IUWT decomposition in the recursive way.
return:
[ W_1, W_2, ..., W_n, A_n ]
n = level
W: wavelet details
A: approximation
"""
self.level = level
self.boundary = boundary
self.decomposition = self.__decompose(self.data, self.phi, level=level)
return self.decomposition
def __decompose(self, data, phi, level):
"""
2D IUWT decomposition (or stationary wavelet transform).
This is a convolution version, where kernel is zero-upsampled
explicitly. Not fast.
Parameters:
- level : level of decomposition
- phi : low-pass filter kernel
- boundary : boundary conditions (passed to scipy.signal.convolve2d,
'symm' by default)
Returns:
list of wavelet details + last approximation. Each element in
the list is an image of the same size as the input image.
"""
if level <= 0:
return data
shapecheck = map(lambda a,b:a>b, data.shape, phi.shape)
assert np.all(shapecheck)
# approximation:
approx = signal.convolve2d(data, phi[::-1, ::-1], mode="same",
boundary=self.boundary)
# wavelet details:
w = data - approx
phi_up = self.zupsample(phi, order=1)
shapecheck = map(lambda a,b:a>b, data.shape, phi_up.shape)
if level == 1:
return [w, approx]
elif not np.all(shapecheck):
print("Maximum allowed decomposition level reached",
file=sys.stderr)
return [w, approx]
else:
return [w] + self.__decompose(approx, phi_up, level-1)
@staticmethod
def zupsample(data, order=1):
"""
Upsample data array by interleaving it with zero's.
h{up_order: n}[l] = (1) h[l], if l % 2^n == 0;
(2) 0, otherwise
"""
shape = data.shape
new_shape = [ (2**order * (n-1) + 1) for n in shape ]
output = np.zeros(new_shape, dtype=data.dtype)
output[[ slice(None, None, 2**order) for d in shape ]] = data
return output
def reconstruct(self, decomposition=None):
if decomposition is not None:
reconstruction = np.sum(decomposition, axis=0)
return reconstruction
else:
self.reconstruction = np.sum(self.decomposition, axis=0)
def get_detail(self, scale):
"""
Get the wavelet detail coefficients of given scale.
Note: 1 <= scale <= level
"""
if scale < 1 or scale > self.level:
raise ValueError("Invalid scale")
return self.decomposition[scale-1]
def get_approx(self):
"""
Get the approximation coefficients of the largest scale.
"""
return self.decomposition[-1]
# IUWT }}}
class IUWT_VST(IUWT): # {{{
"""
IUWT with Multi-scale variance stabling transform.
Refernce:
[1] Bo Zhang, Jalal M. Fadili & Jean-Luc Starck,
IEEE Trans. Image Processing, 17, 17, 2008
"""
# VST coefficients and the corresponding asymptotic standard deviation
# of each scale.
vst_coef = []
def reset(self):
super(self.__class__, self).reset()
vst_coef = []
def __decompose(self):
raise AttributeError("No '__decompose' attribute")
@staticmethod
def soft_threshold(data, threshold):
if isinstance(data, np.ndarray):
data_th = data.copy()
data_th[np.abs(data) <= threshold] = 0.0
data_th[data > threshold] -= threshold
data_th[data < -threshold] += threshold
else:
data_th = data
if np.abs(data) <= threshold:
data_th = 0.0
elif data > threshold:
data_th -= threshold
else:
data_th += threshold
return data_th
def tau(self, k, scale):
"""
Helper function used in VST coefficients calculation.
"""
return np.sum(np.power(self.filters[scale], k))
def filters_product(self, scale1, scale2):
"""
Calculate the scalar product of the filters of two scales,
considering only the overlapped part.
Helper function used in VST coefficients calculation.
"""
if scale1 > scale2:
filter_big = self.filters[scale1]
filter_small = self.filters[scale2]
else:
filter_big = self.filters[scale2]
filter_small = self.filters[scale1]
# crop the big filter to match the size of the small filter
size_big = filter_big.shape
size_small = filter_small.shape
size_diff2 = list(map(lambda a,b: (a-b)//2, size_big, size_small))
filter_big_crop = filter_big[
size_diff2[0]:(size_big[0]-size_diff2[0]),
size_diff2[1]:(size_big[1]-size_diff2[1])]
assert(np.all(list(map(lambda a,b: a==b,
size_small, filter_big_crop.shape))))
product = np.sum(filter_small * filter_big_crop)
return product
def calc_vst_coef(self):
"""
Calculate the VST coefficients and the corresponding
asymptotic standard deviation of each scale, according to the
calculated filters of each scale/level.
"""
self.vst_coef = []
for scale in range(self.level+1):
b = 2 * np.sqrt(np.abs(self.tau(1, scale)) / self.tau(2, scale))
c = 7.0*self.tau(2, scale) / (8.0*self.tau(1, scale)) - \
self.tau(3, scale) / (2.0*self.tau(2, scale))
if scale == 0:
std = -1.0
else:
std = np.sqrt((self.tau(2, scale-1) / \
(4 * self.tau(1, scale-1)**2)) + \
(self.tau(2, scale) / (4 * self.tau(1, scale)**2)) - \
(self.filters_product(scale-1, scale) / \
(2 * self.tau(1, scale-1) * self.tau(1, scale))))
self.vst_coef.append({ "b": b, "c": c, "std": std })
def vst(self, data, scale, coupled=True):
"""
Perform variance stabling transform
XXX: parameter `coupled' why??
Credit: MSVST-V1.0/src/libmsvst/B3VSTAtrous.h
"""
self.vst_coupled = coupled
if self.vst_coef == []:
self.calc_vst_coef()
if coupled:
b = 1.0
else:
b = self.vst_coef[scale]["b"]
data_vst = b * np.sqrt(np.abs(data + self.vst_coef[scale]["c"]))
return data_vst
def ivst(self, data, scale, cbias=True):
"""
Inverse variance stabling transform
NOTE: assuming that `a_{j} + c^{j}' are all positive.
XXX: parameter `cbias' why??
`bias correction' is recommended while reconstruct the data
after estimation
Credit: MSVST-V1.0/src/libmsvst/B3VSTAtrous.h
"""
self.vst_cbias = cbias
if cbias:
cb = 1.0 / (self.vst_coef[scale]["b"] ** 2)
else:
cb = 0.0
data_ivst = data ** 2 + cb - self.vst_coef[scale]["c"]
return data_ivst
def is_significant(self, scale, fdr=0.1, independent=False, verbose=False):
"""
Multiple hypothesis testing with false discovery rate (FDR) control.
`independent': whether the test statistics of all the null
hypotheses are independent.
If `independent=True': FDR <= (m0/m) * q
otherwise: FDR <= (m0/m) * q * (1 + 1/2 + 1/3 + ... + 1/m)
References:
[1] False discovery rate - Wikipedia
https://en.wikipedia.org/wiki/False_discovery_rate
"""
coef = self.get_detail(scale)
std = self.vst_coef[scale]["std"]
pvalues = 2.0 * (1.0 - sp.stats.norm.cdf(np.abs(coef) / std))
p_sorted = pvalues.flatten()
p_sorted.sort()
N = len(p_sorted)
if independent:
cn = 1.0
else:
cn = np.sum(1.0 / np.arange(1, N+1))
p_comp = fdr * np.arange(N) / (N * cn)
comp = (p_sorted < p_comp)
if np.sum(comp) == 0:
# `comp' contains ALL False
p_cutoff = 0.0
else:
# cutoff p-value after FDR control/correction
p_cutoff = np.max(p_sorted[comp])
sig = (pvalues <= p_cutoff)
if verbose:
print("std/sigma: %g, p_cutoff: %g" % (std, p_cutoff),
flush=True, file=sys.stderr)
return (sig, p_cutoff)
def denoise(self, fdr=0.1, fdr_independent=False, start_scale=1,
end_scale=None, verbose=False):
"""
Denoise the wavelet coefficients by controlling FDR.
"""
self.fdr = fdr
self.fdr_indepent = fdr_independent
self.denoised = []
# supports of significant coefficients of each scale
self.sig_supports = [None] # make index match the scale
self.p_cutoff = [None]
if verbose:
print("MSVST denosing ...", flush=True, file=sys.stderr)
for scale in range(1, self.level+1):
coef = self.get_detail(scale)
if verbose:
print("\tScale %d: " % scale, end="",
flush=True, file=sys.stderr)
if (scale < start_scale) or \
((end_scale is not None) and scale > end_scale):
if verbose:
print("skipped", flush=True, file=sys.stderr)
sig, p_cutoff = None, None
else:
sig, p_cutoff = self.is_significant(scale, fdr=fdr,
independent=fdr_independent, verbose=verbose)
coef[np.logical_not(sig)] = 0.0
#
self.denoised.append(coef)
self.sig_supports.append(sig)
self.p_cutoff.append(p_cutoff)
# append the last approximation
self.denoised.append(self.get_approx())
def decompose(self, level=5, boundary="symm", verbose=False):
"""
2D IUWT decomposition with VST.
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.calc_vst_coef()
self.decomposition = []
approx = self.data
if verbose:
print("IUWT decomposing (%d levels): " % level,
end="", flush=True, file=sys.stderr)
for scale in range(1, level+1):
if verbose:
print("%d..." % scale, end="", flush=True, file=sys.stderr)
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = self.vst(approx, scale=scale-1) - self.vst(approx2, scale=scale)
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
if verbose:
print("DONE!", flush=True, file=sys.stderr)
return self.decomposition
def reconstruct_ivst(self, denoised=True, positive_project=True):
"""
Reconstruct the original image from the *un-denoised* decomposition
by applying the inverse VST.
This reconstruction result is also used as the `initial condition'
for the below `iterative reconstruction' algorithm.
arguments:
* denoised: whether use th denoised data or the direct decomposition
* positive_project: whether replace negative values with zeros
"""
if denoised:
decomposition = self.denoised
else:
decomposition = self.decomposition
self.positive_project = positive_project
details = np.sum(decomposition[:-1], axis=0)
approx = self.vst(decomposition[-1], scale=self.level)
reconstruction = self.ivst(approx+details, scale=0)
if positive_project:
reconstruction[reconstruction < 0.0] = 0.0
self.reconstruction = reconstruction
return reconstruction
def reconstruct(self, denoised=True, niter=10, verbose=False):
"""
Reconstruct the original image using iterative method with
L1 regularization, because the denoising violates the exact inverse
procedure.
arguments:
* denoised: whether use the denoised coefficients
* niter: number of iterations
"""
if denoised:
decomposition = self.denoised
else:
decomposition = self.decomposition
# L1 regularization
lbd = 1.0
delta = lbd / (niter - 1)
# initial solution
solution = self.reconstruct_ivst(denoised=denoised,
positive_project=True)
#
iuwt = IUWT(level=self.level)
iuwt.calc_filters()
# iterative reconstruction
if verbose:
print("Iteratively reconstructing (%d times): " % niter,
end="", flush=True, file=sys.stderr)
for i in range(niter):
if verbose:
print("%d..." % i, end="", flush=True, file=sys.stderr)
tempd = self.data.copy()
solution_decomp = []
for scale in range(1, self.level+1):
approx, detail = iuwt.transform(tempd, scale)
approx_sol, detail_sol = iuwt.transform(solution, scale)
# Update coefficients according to the significant supports,
# which are acquired during the denosing precodure with FDR.
sig = self.sig_supports[scale]
detail_sol[sig] = detail[sig]
detail_sol = self.soft_threshold(detail_sol, threshold=lbd)
#
solution_decomp.append(detail_sol)
tempd = approx.copy()
solution = approx_sol.copy()
# last approximation (the two are the same)
solution_decomp.append(approx)
# reconstruct
solution = iuwt.reconstruct(decomposition=solution_decomp)
# discard all negative values
solution[solution < 0] = 0.0
#
lbd -= delta
if verbose:
print("DONE!", flush=True, file=sys.stderr)
#
self.reconstruction = solution
return self.reconstruction
# IUWT_VST }}}
def main():
# commandline arguments parser
parser = argparse.ArgumentParser(
description="Poisson Noise Removal with Multi-scale Variance " + \
"Stabling Transform and Wavelet Transform",
epilog="Version: %s (%s)" % (__version__, __date__))
parser.add_argument("-l", "--level", dest="level",
type=int, default=5,
help="level of the IUWT decomposition")
parser.add_argument("-r", "--fdr", dest="fdr",
type=float, default=0.1,
help="false discovery rate")
parser.add_argument("-I", "--fdr-independent", dest="fdr_independent",
action="store_true", default=False,
help="whether the FDR null hypotheses are independent")
parser.add_argument("-s", "--start-scale", dest="start_scale",
type=int, default=1,
help="which scale to start the denoising (inclusive)")
parser.add_argument("-e", "--end-scale", dest="end_scale",
type=int, default=0,
help="which scale to end the denoising (inclusive)")
parser.add_argument("-n", "--niter", dest="niter",
type=int, default=10,
help="number of iterations for reconstruction")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="show verbose progress")
parser.add_argument("-C", "--clobber", dest="clobber",
action="store_true", default=False,
help="overwrite output file if exists")
parser.add_argument("infile", help="input image with Poisson noises")
parser.add_argument("outfile", help="output denoised image")
args = parser.parse_args()
if args.end_scale == 0:
args.end_scale = args.level
if args.verbose:
print("infile: '%s'" % args.infile, file=sys.stderr)
print("outfile: '%s'" % args.outfile, file=sys.stderr)
print("level: %d" % args.level, file=sys.stderr)
print("fdr: %.2f" % args.fdr, file=sys.stderr)
print("fdr_independent: %s" % args.fdr_independent, file=sys.stderr)
print("start_scale: %d" % args.start_scale, file=sys.stderr)
print("end_scale: %d" % args.end_scale, file=sys.stderr)
print("niter: %d\n" % args.niter, flush=True, file=sys.stderr)
if not args.clobber and os.path.exists(args.outfile):
raise OSError("outfile '%s' already exists" % args.outfile)
imgfits = fits.open(args.infile)
img = imgfits[0].data
# Remove Poisson noises
msvst = IUWT_VST(data=img)
msvst.decompose(level=args.level, verbose=args.verbose)
msvst.denoise(fdr=args.fdr, fdr_independent=args.fdr_independent,
start_scale=args.start_scale, end_scale=args.end_scale,
verbose=args.verbose)
msvst.reconstruct(denoised=True, niter=args.niter, verbose=args.verbose)
img_denoised = msvst.reconstruction
# Output
imgfits[0].data = img_denoised
imgfits[0].header.add_history("%s: Removed Poisson Noises @ %s" % (
os.path.basename(sys.argv[0]), datetime.utcnow().isoformat()))
imgfits[0].header.add_history(" TOOL: %s (v%s, %s)" % (
os.path.basename(sys.argv[0]), __version__, __date__))
imgfits[0].header.add_history(" PARAM: %s" % " ".join(sys.argv[1:]))
imgfits.writeto(args.outfile, checksum=True, clobber=args.clobber)
if __name__ == "__main__":
main()
| liweitianux/atoolbox | python/msvst_starlet.py | Python | mit | 23,422 |
def subtrees_equal(expected_schema_node, actual_node):
if expected_schema_node[0] != actual_node.get_name():
return False
if expected_schema_node[1] != actual_node.get_state():
return False
expected_children = expected_schema_node[2]
actual_children = actual_node.get_children()
actual_children_names = [child.get_name() for child in actual_children]
actual_children_names.sort()
if len(expected_children) != len(actual_children_names):
return False
for (expected_child, actual_child_name) in \
zip(expected_children, actual_children_names):
subtrees_equal(
expected_child, actual_node.get_child(actual_child_name))
return True | mkobos/tree_crawler | concurrent_tree_crawler/test/subtrees_comparer.py | Python | mit | 650 |
# -*- coding: utf-8 -*-
import sys
import string
from datetime import datetime,timedelta
import calendar
import csv
import re
# ファイルオープン(fpは引数でログファイル,wfpは書き出すcsvファイルを指定)
fp = open(sys.argv[1],'r')
# logがローテートするタイミングが1日の間にある場合,/var/log/kern.logと/var/log/kern.log.1の両方を読み込む必要があるかもしれない
wfp = open('/path/to/program/csv_data/formatted.csv', 'a')
writer = csv.writer(wfp, lineterminator='\n')
# 昨日の日付を計算
yesterday = datetime.now() + timedelta(days=-1)
print "下記の日時のログ整形データをformatted.csvに書き出します"
print yesterday.strftime('%Y %b %d %H:%M:%S')
# idと書き出し用リストの変数を作成
i = 0
w = [0] * 7
# csvヘッダの作成
#w[0] = "id"
#w[1] = "weekday"
#w[2] = "hour"
#w[3] = "smacaddr"
#w[4] = "dipaddr"
#w[5] = "proto"
#w[6] = "spt"
# ファイルに1行出力
#writer.writerow(w)
# ログファイルのEOFまで
for line in fp.readlines():
# フォワーディングパケットで,内部ネットから出るログを指定
if line.find("FORWARD_F IN=eth1") >= 0:
# kernel:の数値の[の後に空白が入ると,後のsplitでうまくきれないため,[を削除する
line = line.replace('[','')
line = line.replace(' DF ',' ')
# 0文字以上の文字をsplitで切り出し
l = filter(lambda x: len(x)>0, re.split(r" ", line))
#昨日の日時と一致するログを出力
if l[0] == yesterday.strftime('%b') and int(l[1], 10) == int(yesterday.strftime('%d'), 10):
# print l
# id
w[0] = i
# 昨日の曜日(Mon:0,Tue;1,Wed:2,Thu:3,FRI:4,SAT:5,SUN:6)
w[1] = yesterday.weekday()
# 時刻(時のみ)
w[2] = int(l[2][:2], 10)
# 送信元MACアドレス
w[3] = l[9][4:]
# 送信先IPアドレス
w[4] = l[11][4:]
# プロトコル
w[5] = l[17][6:]
# 送信先ポート番号
# プロトコルがICMPなら,送信先ポート番号を0に
if l[17][6:] == "ICMP":
l[19] = 0
w[6] = l[19]
else:
w[6] = l[19][4:]
i += 1
# ファイルに1行出力
writer.writerow(w)
# ファイルクローズ
fp.close()
wfp.close()
| High-Hill/bachelor_dap_gw | program/log_formatting.py | Python | mit | 2,502 |
'''
Precondition
successfully pass a users test.
'''
from datetime import datetime, timedelta
import time
import pytest
import requests
from kii import AccountType, exceptions as exc, results as rs
from kii.data import BucketType, clauses as cl
from tests.conf import (
get_env,
get_api_with_test_user,
cleanup,
)
GROUP_NAME = 'test_group'
BUCKET_ID = 'test_bucket'
class TestApplicationScopeData:
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
cleanup()
self.api = get_api_with_test_user()
self.scope = self.api.data.application
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
try:
self.scope.delete_a_bucket(BUCKET_ID)
except exc.KiiBucketNotFoundError:
pass
cleanup()
def test_retrieve_bucket(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert obj
bucket = self.scope.retrieve_a_bucket(BUCKET_ID)
assert isinstance(bucket, rs.BucketResult)
assert bucket.bucket_type is BucketType.READ_WRITE
assert bucket.size > 0
def test_delete_bucket(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert obj
self.scope.delete_a_bucket(BUCKET_ID)
with pytest.raises(exc.KiiBucketNotFoundError):
self.scope.delete_a_bucket(BUCKET_ID)
def test_create_an_object(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert isinstance(obj, rs.CreateResult)
assert obj.object_id
assert obj.created_at
assert isinstance(obj.created_at, datetime)
assert obj.data_type
assert obj.data_type == 'application/json'
def test_retrieve_an_object(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
result = self.scope(BUCKET_ID).retrieve_an_object(obj.object_id)
assert isinstance(result, rs.ObjectResult)
assert result._id
assert isinstance(result._id, str)
assert result._created
assert result._modified
def test_fully_update_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
updated = bucket.fully_update_an_object(obj.object_id, {
'str key': 'updated string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
'list key': [4, 5, 6],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
updated = bucket.retrieve_an_object(obj.object_id)
assert 'int key' not in updated
assert updated['str key'] == 'updated string'
assert updated['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert updated['list key'] == [4, 5, 6]
assert created._created == updated._created
assert created._modified != updated._modified
assert created._version != updated._version
def test_create_a_new_object_with_an_id(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created2 = bucket.create_a_new_object_with_an_id('new-object-id', {
'str key': 'created2 string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
'list key': [4, 5, 6],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 2
created2 = bucket.retrieve_an_object('new-object-id')
assert 'int key' not in created2
assert created2['str key'] == 'created2 string'
assert created2['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert created2['list key'] == [4, 5, 6]
assert created._created != created2._created
assert created._modified != created2._modified
assert created._version == 1
assert created2._version == 1
def test_partially_update_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
updated = bucket.partially_update_an_object(obj.object_id, {
'str key': 'updated string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
updated = bucket.retrieve_an_object(obj.object_id)
assert 'int key' in updated
assert updated['int key'] == 1
assert updated['str key'] == 'updated string'
assert updated['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert 'list key' in updated
assert updated['list key'] == [1, 2, 3]
assert created._created == updated._created
assert created._modified != updated._modified
assert created._version == 1
assert updated._version == 2
def test_delete_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
bucket.delete_an_object(obj.object_id)
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 0
with pytest.raises(exc.KiiObjectNotFoundError):
obj = bucket.retrieve_an_object(obj.object_id)
def test_query_for_objects(self):
bucket = self.scope(BUCKET_ID)
OBJ_COUNT = 10
for i in range(OBJ_COUNT):
even = i % 2 == 0
bucket.create_an_object({
'index': i,
'desc': 'An object number is {0}.'.format(i + 1),
'name': 'test user',
'even': even,
})
# all
results = bucket.query_for_objects()
assert len(results) == OBJ_COUNT
# equal
results = bucket.query_for_objects(cl.Clause.eq('index', 3))
assert len(results) == 1
assert results[0]['index'] == 3
assert results[0]['desc'] == 'An object number is 4.'
# not
results = bucket.query_for_objects(cl.Clause.not_(cl.Clause.eq('index', 2)))
assert len(results) == OBJ_COUNT - 1
for r in results:
assert r['index'] != 2
# prefix
results = bucket.query_for_objects(cl.Clause.prefix('name', 'tes'))
assert len(results) == OBJ_COUNT
# range
results = bucket.query_for_objects(cl.RangeClause('index').le(2))
assert len(results) == 3
results = bucket.query_for_objects(cl.RangeClause('index').lt(2))
assert len(results) == 2
results = bucket.query_for_objects(cl.RangeClause('index').ge(2))
assert len(results) == OBJ_COUNT - 2
results = bucket.query_for_objects(cl.RangeClause('index').gt(2))
assert len(results) == OBJ_COUNT - 3
# in
results = bucket.query_for_objects(cl.Clause.in_('index', [1, 3, 4]))
assert len(results) == 3
for r in results:
assert r['index'] in [1, 3, 4]
# has
results = bucket.query_for_objects(cl.HasFieldClause('index', 'INTEGER'))
assert len(results) == OBJ_COUNT
results = bucket.query_for_objects(cl.HasFieldClause('index', 'STRING'))
assert len(results) == 0
results = bucket.query_for_objects(
cl.HasFieldClause('index', cl.HasFieldClause.Types.integer))
assert len(results) == OBJ_COUNT
results = bucket.query_for_objects(
cl.HasFieldClause('index', cl.HasFieldClause.Types.string))
assert len(results) == 0
# and
results = bucket.query_for_objects(
cl.AndClause(
cl.Clause.eq('even', True),
cl.RangeClause('index').le(6)
)
)
assert len(results) == 6 // 2 + 1
# or
results = bucket.query_for_objects(
cl.OrClause(
cl.Clause.eq('even', True),
cl.RangeClause('index').le(6)
)
)
assert len(results) == 6 + (OBJ_COUNT - 6) // 2
# order_by, descending
results = bucket.query_for_objects(order_by='index', descending=True)
for i, r in enumerate(results):
assert r['index'] == OBJ_COUNT - i - 1
results = bucket.query_for_objects(order_by='index', descending=False)
for i, r in enumerate(results):
assert r['index'] == i
# limit
results = bucket.query_for_objects(limit=2)
assert len(results) == 2
results = bucket.query_for_objects(limit=4)
assert len(results) == 4
results = bucket.query_for_objects(limit=OBJ_COUNT + 20)
assert len(results) == OBJ_COUNT
def test_query_for_objects_pagination_key(self):
bucket = self.scope(BUCKET_ID)
OBJ_COUNT = 20
for i in range(OBJ_COUNT):
even = i % 2 == 0
bucket.create_an_object({
'index': i,
'desc': 'An object number is {0}.'.format(i + 1),
'name': 'test user',
'even': even,
})
# pagination_key
results = bucket.query_for_objects(limit=3)
assert len(results) == 3
results = bucket.query_for_objects(limit=3,
pagination_key=results.next_pagination_key)
assert len(results) == 3
results = bucket.query_for_objects(pagination_key=results.next_pagination_key)
assert len(results) == OBJ_COUNT - 6
assert results.next_pagination_key is None
def test_query_for_objects_huge(self):
bucket = self.scope(BUCKET_ID)
OBJ_COUNT = 410
for i in range(OBJ_COUNT):
even = i % 2 == 0
bucket.create_an_object({
'index': i,
'desc': 'An object number is {0}.'.format(i + 1),
'name': 'test user',
'even': even,
})
# pagination_key
results = bucket.query_for_objects()
assert len(results) == OBJ_COUNT
| ta2xeo/python3-kii | tests/test_data/application/test_application_scope_data.py | Python | mit | 13,511 |
import inspect
from functools import total_ordering
def yield_once(iterator):
"""
Decorator to make an iterator yield each result only once.
:param iterator: Any iterator
:return: An iterator that yields every result only once at most.
"""
def yield_once_generator(*args, **kwargs):
yielded = []
for item in iterator(*args, **kwargs):
if item in yielded:
pass
else:
yielded.append(item)
yield item
return yield_once_generator
def _to_list(var):
"""
Make variable to list.
:param var: variable of any type
:return: list
"""
if isinstance(var, list):
return var
elif var is None:
return []
elif isinstance(var, str) or isinstance(var, dict):
# We dont want to make a list out of those via the default constructor
return [var]
else:
try:
return list(var)
except TypeError:
return [var]
def arguments_to_lists(function):
"""
Decorator for a function that converts all arguments to lists.
:param function: target function
:return: target function with only lists as parameters
"""
def l_function(*args, **kwargs):
l_args = [_to_list(arg) for arg in args]
l_kwargs = {}
for key, value in kwargs.items():
l_kwargs[key] = _to_list(value)
return function(*l_args, **l_kwargs)
return l_function
def _get_member(obj, member):
# If not found, pass AttributeError to invoking function.
attribute = getattr(obj, member)
if callable(attribute) and hasattr(attribute, "__self__"):
# If the value is a bound method, invoke it like a getter and return
# its value.
try:
return attribute()
except TypeError:
# Don't use repr() to display the member more accurately, because
# invoking repr() on a bound method prints in this format:
# <bound method CLASS.METHOD of **repr(instance)**>
# This invokes repr() recursively.
raise TypeError("Given bound method '" + member + "' must be "
"callable like a getter, taking no arguments.")
else:
# Otherwise it's a member variable or property (or any other attribute
# that holds a value).
return attribute
def _construct_repr_string(obj, members):
# The passed entries have format (member-name, repr-function).
values = ", ".join(member + "=" + func(_get_member(obj, member))
for member, func in members)
return ("<" + type(obj).__name__ + " object(" + values + ") at "
+ hex(id(obj)) + ">")
def get_public_members(obj):
"""
Retrieves a list of member-like objects (members or properties) that are
publically exposed.
:param obj: The object to probe.
:return: A list of strings.
"""
return {attr: getattr(obj, attr) for attr in dir(obj)
if not attr.startswith("_")
and not hasattr(getattr(obj, attr), '__call__')}
def generate_repr(*members):
"""
Decorator that binds an auto-generated ``__repr__()`` function to a class.
The generated ``__repr__()`` function prints in following format:
<ClassName object(field1=1, field2='A string', field3=[1, 2, 3]) at 0xAAAA>
Note that this decorator modifies the given class in place!
:param members: An iterable of member names to include into the
representation-string. Providing no members yields
to inclusion of all member variables and properties
in alphabetical order (except if they start with an
underscore).
To control the representation of each member, you
can also pass a tuple where the first element
contains the member to print and the second one the
representation function (which defaults to the
built-in ``repr()``). Using None as representation
function is the same as using ``repr()``.
Supported members are fields/variables, properties
and getter-like functions (functions that accept no
arguments).
:raises ValueError: Raised when the passed
(member, repr-function)-tuples have not a length of
2.
:raises AttributeError: Raised when a given member/attribute was not found
in class.
:raises TypeError: Raised when a provided member is a bound method
that is not a getter-like function (means it must
accept no parameters).
:return: The class armed with an auto-generated __repr__
function.
"""
def decorator(cls):
cls.__repr__ = __repr__
return cls
if members:
# Prepare members list.
members_to_print = list(members)
for i, member in enumerate(members_to_print):
if isinstance(member, tuple):
# Check tuple dimensions.
length = len(member)
if length == 2:
members_to_print[i] = (member[0],
member[1] if member[1] else repr)
else:
raise ValueError("Passed tuple " + repr(member) +
" needs to be 2-dimensional, but has " +
str(length) + " dimensions.")
else:
members_to_print[i] = (member, repr)
def __repr__(self):
return _construct_repr_string(self, members_to_print)
else:
def __repr__(self):
# Need to fetch member variables every time since they are unknown
# until class instantation.
members_to_print = get_public_members(self)
member_repr_list = ((member, repr) for member in
sorted(members_to_print, key=str.lower))
return _construct_repr_string(self, member_repr_list)
return decorator
def generate_eq(*members):
"""
Decorator that generates equality and inequality operators for the
decorated class. The given members as well as the type of self and other
will be taken into account.
Note that this decorator modifies the given class in place!
:param members: A list of members to compare for equality.
"""
def decorator(cls):
def eq(self, other):
if type(other) is not type(self):
return False
return all(getattr(self, member) == getattr(other, member)
for member in members)
def ne(self, other):
return not eq(self, other)
cls.__eq__ = eq
cls.__ne__ = ne
return cls
return decorator
def generate_ordering(*members):
"""
Decorator that generates ordering operators for the decorated class based
on the given member names. All ordering except equality functions will
raise a TypeError when a comparison with an unrelated class is attempted.
(Comparisons with child classes will thus work fine with the capabilities
of the base class as python will choose the base classes comparison
operator in that case.)
Note that this decorator modifies the given class in place!
:param members: A list of members to compare, ordered from high priority to
low. I.e. if the first member is equal the second will be
taken for comparison and so on. If a member is None it is
considered smaller than any other value except None.
"""
def decorator(cls):
def lt(self, other):
if not isinstance(other, cls):
raise TypeError("Comparison with unrelated classes is "
"unsupported.")
for member in members:
if getattr(self, member) == getattr(other, member):
continue
if (
getattr(self, member) is None or
getattr(other, member) is None):
return getattr(self, member) is None
return getattr(self, member) < getattr(other, member)
return False
cls.__lt__ = lt
return total_ordering(generate_eq(*members)(cls))
return decorator
def assert_right_type(value, types, argname):
if isinstance(types, type) or types is None:
types = (types,)
for typ in types:
if value == typ or (isinstance(typ, type) and isinstance(value, typ)):
return
raise TypeError("{} must be an instance of one of {} (provided value: "
"{})".format(argname, types, repr(value)))
def enforce_signature(function):
"""
Enforces the signature of the function by throwing TypeError's if invalid
arguments are provided. The return value is not checked.
You can annotate any parameter of your function with the desired type or a
tuple of allowed types. If you annotate the function with a value, this
value only will be allowed (useful especially for None). Example:
>>> @enforce_signature
... def test(arg: bool, another: (int, None)):
... pass
...
>>> test(True, 5)
>>> test(True, None)
Any string value for any parameter e.g. would then trigger a TypeError.
:param function: The function to check.
"""
argspec = inspect.getfullargspec(function)
annotations = argspec.annotations
argnames = argspec.args
unnamed_annotations = {}
for i, arg in enumerate(argnames):
if arg in annotations:
unnamed_annotations[i] = (annotations[arg], arg)
def decorated(*args, **kwargs):
for i, annotation in unnamed_annotations.items():
if i < len(args):
assert_right_type(args[i], annotation[0], annotation[1])
for argname, argval in kwargs.items():
if argname in annotations:
assert_right_type(argval, annotations[argname], argname)
return function(*args, **kwargs)
return decorated
class classproperty(property):
"""
Decorator to set a class function to a class property.
Given a class like:
>>> class test:
... @classproperty
... def func(self):
... return 1
We can now access the class property using the class name:
>>> test.func
1
And we can still have the same behaviour with an instance:
>>> test().func
1
"""
def __get__(self, obj, type_):
return self.fget.__get__(None, type_)(type_)
| Adrianzatreanu/coala-decorators | coala_decorators/decorators.py | Python | mit | 11,053 |
def count_inversion(sequence):
flag, answer, sequence = True, 0, list(sequence)
while flag:
flag = False
for i in xrange(1, len(sequence)):
if sequence[i-1] > sequence[i]:
sequence[i], sequence[i-1] = sequence[i-1], sequence[i]
answer += 1
flag = True
return answer
def test_function():
assert count_inversion((1, 2, 5, 3, 4, 7, 6)) == 3, "Example"
assert count_inversion((0, 1, 2, 3)) == 0, "Sorted"
assert count_inversion((99, -99)) == 1, "Two numbers"
assert count_inversion((5, 3, 2, 1, 0)) == 10, "Reversed"
| denisbalyko/checkio-solution | count-inversions.py | Python | mit | 619 |
from datetime import datetime
from .base import BaseModel
class Comment(BaseModel):
def __init__(self, **kwargs):
# Add created and updated attrs by default.
self.created = self.updated = datetime.now()
super().__init__(**kwargs)
def update(self):
""" Extends update method to update some fields before saving. """
self.updated = datetime.now()
super().update()
| oldani/nanodegree-blog | app/models/comment.py | Python | mit | 422 |
# -*- coding: utf-8 -*-
import json
import httplib2
import sys
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
foursquare_client_id = 'SMQNYZFVCIOYIRAIXND2D5SYBLQUOPDB4HZTV13TT22AGACD'
foursquare_client_secret = 'IHBS4VBHYWJL53NLIY2HSVI5A1144GJ3MDTYYY1KLKTMC4BV'
google_api_key = 'AIzaSyBz7r2Kz6x7wO1zV9_O5Rcxmt8NahJ6kos'
def getGeocodeLocation(inputString):
#Replace Spaces with '+' in URL
locationString = inputString.replace(" ", "+")
url = ('https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s'% (locationString, google_api_key))
h = httplib2.Http()
result = json.loads(h.request(url,'GET')[1])
#print response
latitude = result['results'][0]['geometry']['location']['lat']
longitude = result['results'][0]['geometry']['location']['lng']
return (latitude,longitude)
#This function takes in a string representation of a location and cuisine type, geocodes the location, and then pass in the latitude and longitude coordinates to the Foursquare API
def findARestaurant(mealType, location):
latitude, longitude = getGeocodeLocation(location)
url = ('https://api.foursquare.com/v2/venues/search?client_id=%s&client_secret=%s&v=20130815&ll=%s,%s&query=%s' % (foursquare_client_id, foursquare_client_secret,latitude,longitude,mealType))
h = httplib2.Http()
result = json.loads(h.request(url,'GET')[1])
if result['response']['venues']:
#Grab the first restaurant
restaurant = result['response']['venues'][0]
venue_id = restaurant['id']
restaurant_name = restaurant['name']
restaurant_address = restaurant['location']['formattedAddress']
#Format the Restaurant Address into one string
address = ""
for i in restaurant_address:
address += i + " "
restaurant_address = address
#Get a 300x300 picture of the restaurant using the venue_id (you can change this by altering the 300x300 value in the URL or replacing it with 'orginal' to get the original picture
url = ('https://api.foursquare.com/v2/venues/%s/photos?client_id=%s&v=20150603&client_secret=%s' % ((venue_id,foursquare_client_id,foursquare_client_secret)))
result = json.loads(h.request(url,'GET')[1])
#Grab the first image
#if no image available, insert default image url
if result['response']['photos']['items']:
firstpic = result['response']['photos']['items'][0]
prefix = firstpic['prefix']
suffix = firstpic['suffix']
imageURL = prefix + "300x300" + suffix
else:
imageURL = "http://pixabay.com/get/8926af5eb597ca51ca4c/1433440765/cheeseburger-34314_1280.png?direct"
restaurantInfo = {'name':restaurant_name, 'address':restaurant_address, 'image':imageURL}
#print "Restaurant Name: %s " % restaurantInfo['name']
#print "Restaurant Address: %s " % restaurantInfo['address']
#print "Image: %s \n " % restaurantInfo['image']
return restaurantInfo
else:
#print "No Restaurants Found for %s" % location
return "No Restaurants Found"
if __name__ == '__main__':
findARestaurant("Pizza", "Tokyo, Japan")
findARestaurant("Tacos", "Jakarta, Indonesia")
findARestaurant("Tapas", "Maputo, Mozambique")
findARestaurant("Falafel", "Cairo, Egypt")
findARestaurant("Spaghetti", "New Delhi, India")
findARestaurant("Cappuccino", "Geneva, Switzerland")
findARestaurant("Sushi", "Los Angeles, California")
findARestaurant("Steak", "La Paz, Bolivia")
findARestaurant("Gyros", "Sydney Austrailia") | tuanvu216/udacity-course | designing-restful-apis/Lesson_3/06_Adding Features to your Mashup/Starter Code/findARestaurant.py | Python | mit | 3,690 |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AllTimeList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the AllTimeList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
"""
super(AllTimeList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
}
self._uri = '/Accounts/{account_sid}/Usage/Records/AllTime.json'.format(**self._solution)
def stream(self, category=values.unset, start_date=values.unset,
end_date=values.unset, limit=None, page_size=None):
"""
Streams AllTimeInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param AllTimeInstance.Category category: The category
:param date start_date: The start_date
:param date end_date: The end_date
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
category=category,
start_date=start_date,
end_date=end_date,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, category=values.unset, start_date=values.unset,
end_date=values.unset, limit=None, page_size=None):
"""
Lists AllTimeInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param AllTimeInstance.Category category: The category
:param date start_date: The start_date
:param date end_date: The end_date
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance]
"""
return list(self.stream(
category=category,
start_date=start_date,
end_date=end_date,
limit=limit,
page_size=page_size,
))
def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of AllTimeInstance records from the API.
Request is executed immediately
:param AllTimeInstance.Category category: The category
:param date start_date: The start_date
:param date end_date: The end_date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimePage
"""
params = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return AllTimePage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AllTimeList>'
class AllTimePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the AllTimePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimePage
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimePage
"""
super(AllTimePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AllTimeInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
"""
return AllTimeInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AllTimePage>'
class AllTimeInstance(InstanceResource):
class Category(object):
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
IP_MESSAGING = "ip-messaging"
IP_MESSAGING_COMMANDS = "ip-messaging-commands"
IP_MESSAGING_DATA_STORAGE = "ip-messaging-data-storage"
IP_MESSAGING_DATA_TRANSFER = "ip-messaging-data-transfer"
IP_MESSAGING_ENDPOINT_CONNECTIVITY = "ip-messaging-endpoint-connectivity"
LOOKUPS = "lookups"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
def __init__(self, version, payload, account_sid):
"""
Initialize the AllTimeInstance
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
"""
super(AllTimeInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'category': payload['category'],
'count': payload['count'],
'count_unit': payload['count_unit'],
'description': payload['description'],
'end_date': deserialize.iso8601_date(payload['end_date']),
'price': deserialize.decimal(payload['price']),
'price_unit': payload['price_unit'],
'start_date': deserialize.iso8601_date(payload['start_date']),
'subresource_uris': payload['subresource_uris'],
'uri': payload['uri'],
'usage': payload['usage'],
'usage_unit': payload['usage_unit'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
}
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The api_version
:rtype: unicode
"""
return self._properties['api_version']
@property
def category(self):
"""
:returns: The category
:rtype: AllTimeInstance.Category
"""
return self._properties['category']
@property
def count(self):
"""
:returns: The count
:rtype: unicode
"""
return self._properties['count']
@property
def count_unit(self):
"""
:returns: The count_unit
:rtype: unicode
"""
return self._properties['count_unit']
@property
def description(self):
"""
:returns: The description
:rtype: unicode
"""
return self._properties['description']
@property
def end_date(self):
"""
:returns: The end_date
:rtype: date
"""
return self._properties['end_date']
@property
def price(self):
"""
:returns: The price
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The price_unit
:rtype: unicode
"""
return self._properties['price_unit']
@property
def start_date(self):
"""
:returns: The start_date
:rtype: date
"""
return self._properties['start_date']
@property
def subresource_uris(self):
"""
:returns: The subresource_uris
:rtype: unicode
"""
return self._properties['subresource_uris']
@property
def uri(self):
"""
:returns: The uri
:rtype: unicode
"""
return self._properties['uri']
@property
def usage(self):
"""
:returns: The usage
:rtype: unicode
"""
return self._properties['usage']
@property
def usage_unit(self):
"""
:returns: The usage_unit
:rtype: unicode
"""
return self._properties['usage_unit']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AllTimeInstance>'
| angadpc/Alexa-Project- | twilio/rest/api/v2010/account/usage/record/all_time.py | Python | mit | 15,320 |
from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, FieldList
from wtforms.validators import DataRequired, Email, ValidationError
def word_length(limit=None, message=None):
message = message or 'Must not be more than %d words'
message = message % limit
def _length(form, field):
if not field.data or not limit:
return field
if len(field.data.split()) > limit:
raise ValidationError(message)
return _length
class EditSupplierForm(Form):
description = StringField('Supplier summary', validators=[
word_length(50, 'Your summary must not be more than %d words')
])
clients = FieldList(StringField())
def validate_clients(form, field):
if len(field.data) > 10:
raise ValidationError('You must have 10 or fewer clients')
class EditContactInformationForm(Form):
id = IntegerField()
address1 = StringField('Business address')
address2 = StringField('Business address')
city = StringField('Town or city')
country = StringField()
postcode = StringField(validators=[
DataRequired(message="Postcode can not be empty"),
])
website = StringField()
phoneNumber = StringField('Phone number')
email = StringField('Email address', validators=[
DataRequired(message="Email can not be empty"),
Email(message="Please enter a valid email address")
])
contactName = StringField('Contact name', validators=[
DataRequired(message="Contact name can not be empty"),
])
| mtekel/digitalmarketplace-supplier-frontend | app/main/forms/suppliers.py | Python | mit | 1,562 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="icicle.outsidetextfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/icicle/outsidetextfont/_color.py | Python | mit | 469 |
from . import tree
class Node:
def __init__(self, container, parent, value=None):
self._node = tree.Node(container, parent, value)
@property
def _container(self):
return self._node._container
@property
def id(self):
return self._node.id
@property
def value(self):
return self._node.value
@value.setter
def value(self, value):
self._node.value = value
def _create_children(self):
left = Node(self._container, self)
right = Node(self._container, self)
self._children = [left, right]
return self._children
def _create_child(self, index, value):
try:
children = self._children
except AttributeError:
children = self._create_children()
children[index].value = value
return children[index]
def create_left_child(self, value):
return self._create_child(0, value)
def create_right_child(self, value):
return self._create_child(1, value)
def _child(self, index):
try:
child = self._children[index]
except AttributeError:
return None
if child.value is None:
return None
return child
@property
def left_child(self):
return self._child(0)
@property
def right_child(self):
return self._child(1)
class BinaryTree:
def __init__(self, *args, **kwargs):
self._tree = tree.Tree(*args, **kwargs)
@property
def root(self):
try:
return self._root
except AttributeError:
return None
def create_root(self, value):
if self.root:
raise RuntimeError("Cannot set root more that once")
self._root = Node(self, None, value)
return self._root
@property
def _pipe(self):
return self._tree._pipe
def sync(self):
self._tree.sync()
@classmethod
def name(cls):
return tree.Tree.__name__
| alviproject/alvi | alvi/client/containers/binary_tree.py | Python | mit | 2,017 |
# imports
import h2o
import numpy as np
import pandas as pd
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.grid.grid_search import H2OGridSearch
import sys
from operator import add
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
from pyspark.sql import functions as F #https://stackoverflow.com/questions/39504950/python-pyspark-get-sum-of-a-pyspark-dataframe-column-values
from logging_lib.LoggingController import LoggingController
import h2o
h2o.show_progress() # turn on progress bars
from h2o.estimators.glm import H2OGeneralizedLinearEstimator # import GLM models
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.grid.grid_search import H2OGridSearch # grid search
from h2o.estimators.xgboost import H2OXGBoostEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
import xgboost as xgb
import matplotlib
matplotlib.use('Agg') #Need this if running matplot on a server w/o display
from pysparkling import *
#Define your s3 bucket to load and store data
S3_BUCKET = 'rza-ml-1'
#Create a custom logger to log statistics and plots
logger = LoggingController()
logger.s3_bucket = S3_BUCKET
#.config('spark.executor.cores','6') \
spark = SparkSession.builder \
.appName("App") \
.getOrCreate()
# .master("local[*]") \
# .config('spark.cores.max','16')
#.master("local") \
# .config("spark.some.config.option", "some-value") \
spark.sparkContext.setLogLevel('WARN') #Get rid of all the junk in output
Y = 'y'
ID_VAR = 'ID'
DROPS = [ID_VAR]
#From an XGBoost model
# location of "dirty" file
# decision trees handle dirty data elegantly
#path = ## Read File
# NOTE the top 6 are categorical, might want to look into this.
MOST_IMPORTANT_VARS_ORDERD = ['X5','X0','X8','X3','X1','X2','X314','X47','X118',\
'X315','X29','X127','X236','X115','X383','X152','X151','X351','X327','X77','X104',\
'X267','X95','X142']
#Load data from s3
train = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('s3n://'+S3_BUCKET+'/train.csv')
test = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('s3n://'+S3_BUCKET+'/test.csv')
#this needs to be done for h2o glm.predict() bug (which needs same number of columns)
test = test.withColumn(Y,test[ID_VAR])
#Work around for splitting wide data, you need to split on only an ID varaibles
#Then join back with a train varaible (bug in spark as of 2.1 with randomSplit())
train_temp , valid_temp = train.select(ID_VAR).randomSplit([0.7,0.3], seed=123)
valid = valid_temp.join(train,ID_VAR,'inner')
train = train_temp.join(train,ID_VAR,'inner')
# split into 40% training, 30% validation, and 30% test
#train, valid, test = frame.split_frame([0.4, 0.3])
conf = H2OConf(spark=spark)
conf.nthreads = -1
hc = H2OContext.getOrCreate(spark,conf)
print('Making h2o frames...')
train_h20_frame = hc.as_h2o_frame(train, "trainTable")
valid_h20_frame = hc.as_h2o_frame(valid, "validTable")
test_h2o_frame = hc.as_h2o_frame(test, "testTable")
print('Done making h2o frames.')
logger.log_string("Train Summary:")
logger.log_string("Rows:{}".format(train_h20_frame.nrow))
logger.log_string("Cols:{}".format(train_h20_frame.ncol))
X = [name for name in train.columns if name not in ['id', '_WARN_', Y]]
# assign target and inputs
#y = 'bad_loan'
#X = [name for name in frame.columns if name not in ['id', '_WARN_', y]]
#print(y)
#print(X)
# random forest
# initialize rf model
rf_model = H2ORandomForestEstimator(
ntrees=500, # Up to 500 decision trees in the forest
max_depth=30, # trees can grow to depth of 30
stopping_rounds=5, # stop after validation error does not decrease for 5 iterations/new trees
score_each_iteration=True, # score validation error on every iteration/new tree
model_id='rf_model') # for easy lookup in flow
# train rf model
rf_model.train(
x=X,
y=Y,
training_frame=train_h20_frame,
validation_frame=valid_h20_frame)
# print model information
sub = test_h2o_frame[ID_VAR].cbind(rf_model.predict(test_h2o_frame))
print(sub.head())
# create time stamp
import re
import time
time_stamp = re.sub('[: ]', '_', time.asctime())
# save file for submission
sub.columns = [ID_VAR, Y]
sub_fname = 'Submission_'+str(time_stamp) + '.csv'
# h2o.download_csv(sub, 's3n://'+S3_BUCKET+'/kaggle_submissions/Mercedes/' +sub_fname)
spark_sub_frame = hc.as_spark_frame(sub)
spark_sub_frame.select(ID_VAR,Y).coalesce(1).write.option("header","true").csv('s3n://'+S3_BUCKET+'/Kaggle_Submissions/Mercedes/' +sub_fname)
| kcrandall/Kaggle_Mercedes_Manufacturing | spark/experiements/reza/random_forest.py | Python | mit | 5,051 |
import sys
import ctypes
def popcount(N):
if sys.platform.startswith('linux'):
libc = ctypes.cdll.LoadLibrary('libc.so.6')
return libc.__sched_cpucount(ctypes.sizeof(ctypes.c_long), (ctypes.c_long * 1)(N))
elif sys.platform == 'darwin':
libc = ctypes.cdll.LoadLibrary('libSystem.dylib')
return libc.__popcountdi2(N)
else:
assert(False)
def main():
N = int(input())
mod = 10 ** 9 + 7
A = [[int(x) for x in input().split()] for _ in range(N)]
dp = [0] * (1 << N)
dp[0] = 1
for state in range(1 << N):
dp[state] %= mod
i = popcount(state)
for j in range(N):
if (state >> j & 1) == 0 and A[i][j]:
dp[state | (1 << j)] += dp[state]
print(dp[-1])
if __name__ == '__main__':
main()
| knuu/competitive-programming | atcoder/dp/edu_dp_o.py | Python | mit | 818 |
class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
def fill(n):
if n == l:
result.append(''.join(combination))
return
for cs in ds[digits[n]]:
for c in cs:
combination[n] = c
fill(n+1)
ds = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}
l = len(digits)
if l < 1:
return []
combination = ['x'] * l
result = []
fill(0)
return result
print Solution().letterCombinations('238')
| grehujt/leetcode | 17. Letter Combinations of a Phone Number/solution.py | Python | mit | 707 |
import unittest
from numpy import arange, linspace
from numpy.random import seed
from src.bases.root import Root
from src.examples.example_setups import setup_stat_scm
from src.utils.sem_utils.toy_sems import StationaryDependentSEM as StatSEM
from src.utils.sequential_intervention_functions import get_interventional_grids
from src.utils.sequential_sampling import sequentially_sample_model
from src.utils.utilities import convert_to_dict_of_temporal_lists, powerset
seed(seed=0)
class TestRoot(unittest.TestCase):
# Do NOT change the setUp method -- setUp is reserved by unittest.
def setUp(self):
# Use STAT DAG to test Root class
self.T = 3 # Time-steps in DAG
self.n = 4 # Number of observational samples per variable per time-step
self.N = 5 # Number of trials per time-step for method
(
self.init_sem,
self.sem,
_,
self.G,
self.exploration_sets,
self.intervention_domain,
self.true_objective_values,
) = setup_stat_scm(T=self.T)
# Sample observational data using SEM
D_O = sequentially_sample_model(
self.init_sem, self.sem, total_timesteps=self.T, sample_count=self.n, epsilon=None,
)
root_inputs = {
"G": self.G,
"sem": StatSEM,
"base_target_variable": "Y",
"observation_samples": D_O, # Observational samples
"intervention_domain": self.intervention_domain,
"number_of_trials": self.N,
}
self.root = Root(**root_inputs)
def test_setup_STAT_function(self):
self.assertEqual(self.exploration_sets, [("X",), ("Z",), ("X", "Z")])
self.assertEqual(self.intervention_domain, {"X": [-4, 1], "Z": [-3, 3]})
self.assertAlmostEqual(
self.true_objective_values, [-2.1518267393287287, -4.303653478657457, -6.455480217986186], places=7
)
self.assertEqual(self.init_sem.keys(), self.sem.keys())
def test_root_methods(self):
self.assertEqual(
self.root.node_pars,
{
"X_0": (),
"Z_0": ("X_0",),
"Y_0": ("Z_0",),
"X_1": ("X_0",),
"Z_1": ("Z_0", "X_1"),
"Y_1": ("Y_0", "Z_1"),
"X_2": ("X_1",),
"Z_2": ("Z_1", "X_2"),
"Y_2": ("Y_1", "Z_2"),
},
)
self.assertEqual(self.root.outcome_values, {0: [10000000.0], 1: [10000000.0], 2: [10000000.0]})
self.assertEqual(
self.root.sorted_nodes,
{"X_0": 0, "Z_0": 1, "X_1": 2, "Y_0": 3, "Z_1": 4, "X_2": 5, "Y_1": 6, "Z_2": 7, "Y_2": 8},
)
self.assertEqual(self.root.interventional_variable_limits, {"X": [-4, 1], "Z": [-3, 3]})
# If we do not pass any exploration set, then by default the Root class will assign all manipulative variables as the intervention set.
self.assertEqual(self.root.exploration_sets, [("X", "Z")])
self.assertEqual(
self.root.interventional_data_y, {0: {("X", "Z"): None}, 1: {("X", "Z"): None}, 2: {("X", "Z"): None}}
)
self.assertEqual(
self.root.interventional_data_x, {0: {("X", "Z"): None}, 1: {("X", "Z"): None}, 2: {("X", "Z"): None}}
)
def test_dict_to_list_conversion_of_observational_samples(self):
observational_samples = {
"X": arange(0, 9).reshape(3, -1),
"Y": arange(3, 12).reshape(3, -1),
"Z": arange(6, 15).reshape(3, -1),
}
out = convert_to_dict_of_temporal_lists(observational_samples)
self.assertEqual(len(out["X"]), 3)
self.assertEqual(len(out["Z"][0]), 3)
self.assertEqual(sum([len(out["Y"][t]) for t in range(3)]), 9)
def test_interventional_grids(self):
nr_samples = 10
interventional_variable_limits = {"X": [-15, 3], "Z": [-1, 10]}
exploration_sets = list(powerset(self.root.manipulative_variables))
grids = get_interventional_grids(exploration_sets, interventional_variable_limits, nr_samples)
compare_vector = linspace(
interventional_variable_limits["X"][0], interventional_variable_limits["X"][1], num=nr_samples
).reshape(-1, 1)
self.assertEqual(compare_vector.shape, grids[exploration_sets[0]].shape)
self.assertTrue((compare_vector == grids[exploration_sets[0]]).all())
def test_target_variables(self):
self.assertEqual(self.root.all_target_variables, ["Y_0", "Y_1", "Y_2"])
def test_canonical_variables(self):
self.assertEqual(self.root.observational_samples.keys(), {"X", "Y", "Z"})
def test_number_of_nodes_per_time_slice(self):
# Number of nodes per time-slice
v_n = len(self.root.G.nodes()) / self.root.G.T
nodes = list(self.root.G.nodes())
self.assertEqual(v_n, 3)
for t in range(self.G.T):
self.assertEquak(len([v for v in nodes if v.split("_")[1] == str(t)]), v_n)
if __name__ == "__main__":
unittest.main()
| neildhir/DCBO | tests/test_root.py | Python | mit | 5,136 |
# Copyright (c) 2016 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import io
import functools
import hashlib
import hmac
import logging
import os
import re
import shlex
import shutil
import stat
import subprocess
import urllib.parse
import uuid
import werkzeug
import zipfile
from . import app, config, models
from urllib.parse import urlparse
from flask import request, session, redirect, url_for, Response
from datetime import datetime
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
def get_raise(data, key, expect_type=None):
''' Helper function to retrieve an element from a JSON data structure.
The *key* must be a string and may contain periods to indicate nesting.
Parts of the key may be a string or integer used for indexing on lists.
If *expect_type* is not None and the retrieved value is not of the
specified type, TypeError is raised. If the key can not be found,
KeyError is raised. '''
parts = key.split('.')
resolved = ''
for part in parts:
resolved += part
try:
part = int(part)
except ValueError:
pass
if isinstance(part, str):
if not isinstance(data, dict):
raise TypeError('expected dictionary to access {!r}'.format(resolved))
try:
data = data[part]
except KeyError:
raise KeyError(resolved)
elif isinstance(part, int):
if not isinstance(data, list):
raise TypeError('expected list to access {!r}'.format(resolved))
try:
data = data[part]
except IndexError:
raise KeyError(resolved)
else:
assert False, "unreachable"
resolved += '.'
if expect_type is not None and not isinstance(data, expect_type):
raise TypeError('expected {!r} but got {!r} instead for {!r}'.format(
expect_type.__name__, type(data).__name__, key))
return data
def get(data, key, expect_type=None, default=None):
''' Same as :func:`get_raise`, but returns *default* if the key could
not be found or the datatype doesn't match. '''
try:
return get_raise(data, key, expect_type)
except (TypeError, ValueError):
return default
def basic_auth(message='Login required'):
''' Sends a 401 response that enables basic auth. '''
headers = {'WWW-Authenticate': 'Basic realm="{}"'.format(message)}
return Response('Please log in.', 401, headers, mimetype='text/plain')
def requires_auth(func):
''' Decorator for view functions that require basic authentication. '''
@functools.wraps(func)
def wrapper(*args, **kwargs):
ip = request.remote_addr
token_string = session.get('flux_login_token')
token = models.LoginToken.select(lambda t: t.token == token_string).first()
if not token or token.ip != ip or token.expired():
if token and token.expired():
flash("Your login session has expired.")
token.delete()
return redirect(url_for('login'))
request.login_token = token
request.user = token.user
return func(*args, **kwargs)
return wrapper
def with_io_response(kwarg='stream', stream_type='text', **response_kwargs):
''' Decorator for View functions that create a :class:`io.StringIO` or
:class:`io.BytesIO` (based on the *stream_type* parameter) and pass it
as *kwarg* to the wrapped function. The contents of the buffer are
sent back to the client. '''
if stream_type == 'text':
factory = io.StringIO
elif stream_type == 'bytes':
factory = io.BytesIO
else:
raise ValueError('invalid value for stream_type: {!r}'.format(stream_type))
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if kwarg in kwargs:
raise RuntimeError('keyword argument {!r} already occupied'.format(kwarg))
kwargs[kwarg] = stream = factory()
status = func(*args, **kwargs)
return Response(stream.getvalue(), status=status, **response_kwargs)
return wrapper
return decorator
def with_logger(kwarg='logger', stream_dest_kwarg='stream', replace=True):
''' Decorator that creates a new :class:`logging.Logger` object
additionally to or in-place for the *stream* parameter passed to
the wrapped function. This is usually used in combination with
the :func:`with_io_response` decorator.
Note that exceptions with this decorator will be logged and the
returned status code will be 500 Internal Server Error. '''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if replace:
stream = kwargs.pop(stream_dest_kwarg)
else:
stream = kwargs[stream_dest_kwarg]
kwargs[kwarg] = logger = create_logger(stream)
try:
return func(*args, **kwargs)
except BaseException as exc:
logger.exception(exc)
return 500
return wrapper
return decorator
def create_logger(stream, name=__name__, fmt=None):
''' Creates a new :class:`logging.Logger` object with the
specified *name* and *fmt* (defaults to a standard logging
formating including the current time, levelname and message).
The logger will also output to stderr. '''
fmt = fmt or '[%(asctime)-15s - %(levelname)s]: %(message)s'
formatter = logging.Formatter(fmt)
logger = logging.Logger(name)
handler = logging.StreamHandler(stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def stream_file(filename, name=None, mime=None):
def generate():
with open(filename, 'rb') as fp:
yield from fp
if name is None:
name = os.path.basename(filename)
headers = {}
headers['Content-Type'] = mime or 'application/x-octet-stream'
headers['Content-Length'] = os.stat(filename).st_size
headers['Content-Disposition'] = 'attachment; filename="' + name + '"'
return Response(generate(), 200, headers)
def flash(message=None):
if message is None:
return session.pop('flux_flash', None)
else:
session['flux_flash'] = message
def make_secret():
return str(uuid.uuid4())
def hash_pw(pw):
return hashlib.md5(pw.encode('utf8')).hexdigest()
def makedirs(path):
''' Shorthand that creates a directory and stays silent when it
already exists. '''
if not os.path.exists(path):
os.makedirs(path)
def rmtree(path, remove_write_protection=False):
"""
A wrapper for #shutil.rmtree() that can try to remove write protection
if removing fails, if enabled.
"""
if remove_write_protection:
def on_rm_error(func, path, exc_info):
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
else:
on_rm_error = None
shutil.rmtree(path, onerror=on_rm_error)
def zipdir(dirname, filename):
dirname = os.path.abspath(dirname)
zipf = zipfile.ZipFile(filename, 'w')
for root, dirs, files in os.walk(dirname):
for fname in files:
arcname = os.path.join(os.path.relpath(root, dirname), fname)
zipf.write(os.path.join(root, fname), arcname)
zipf.close()
def secure_filename(filename):
"""
Similar to #werkzeug.secure_filename(), but preserves leading dots in
the filename.
"""
while True:
filename = filename.lstrip('/').lstrip('\\')
if filename.startswith('..') and filename[2:3] in '/\\':
filename = filename[3:]
elif filename.startswith('.') and filename[1:2] in '/\\':
filename = filename[2:]
else:
break
has_dot = filename.startswith('.')
filename = werkzeug.secure_filename(filename)
if has_dot:
filename = '.' + filename
return filename
def quote(s, for_ninja=False):
"""
Enhanced implementation of #shlex.quote().
Does not generate single-quotes on Windows.
"""
if os.name == 'nt' and os.sep == '\\':
s = s.replace('"', '\\"')
if re.search('\s', s) or any(c in s for c in '<>'):
s = '"' + s + '"'
else:
s = shlex.quote(s)
return s
def run(command, logger, cwd=None, env=None, shell=False, return_stdout=False,
inherit_env=True):
"""
Run a subprocess with the specified command. The command and output of is
logged to logger. The command will automatically be converted to a string
or list of command arguments based on the *shell* parameter.
# Parameters
command (str, list): A command-string or list of command arguments.
logger (logging.Logger): A logger that will receive the command output.
cwd (str, None): The current working directory.
env (dict, None): The environment for the subprocess.
shell (bool): If set to #True, execute the command via the system shell.
return_stdout (bool): Return the output of the command (including stderr)
to the caller. The result will be a tuple of (returncode, output).
inherit_env (bool): Inherit the current process' environment.
# Return
int, tuple of (int, str): The return code, or the returncode and the
output of the command.
"""
if shell:
if not isinstance(command, str):
command = ' '.join(quote(x) for x in command)
if logger:
logger.info('$ ' + command)
else:
if isinstance(command, str):
command = shlex.split(command)
if logger:
logger.info('$ ' + ' '.join(map(quote, command)))
if env is None:
env = {}
if inherit_env:
env = {**os.environ, **env}
popen = subprocess.Popen(
command, cwd=cwd, env=env, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stdin=None)
stdout = popen.communicate()[0].decode()
if stdout:
if popen.returncode != 0 and logger:
logger.error('\n' + stdout)
else:
if logger:
logger.info('\n' + stdout)
if return_stdout:
return popen.returncode, stdout
return popen.returncode
def ssh_command(url, *args, no_ptty=False, identity_file=None,
verbose=None, options=None):
''' Helper function to generate an SSH command. If not options are
specified, the default option ``BatchMode=yes`` will be set. '''
if options is None:
options = {'BatchMode': 'yes'}
if verbose is None:
verbose = config.ssh_verbose
command = ['ssh']
if url is not None:
command.append(url)
command += ['-o{}={}'.format(k, v) for (k, v) in options.items()]
if no_ptty:
command.append('-T')
if identity_file:
command += ['-o', 'IdentitiesOnly=yes']
# NOTE: Workaround for windows, as the backslashes are gone at the time
# Git tries to use the GIT_SSH_COMMAND.
command += ['-i', identity_file.replace('\\', '/')]
if verbose:
command.append('-v')
if args:
command.append('--')
command += args
return command
def strip_url_path(url):
''' Strips that path part of the specified *url*. '''
result = list(urllib.parse.urlparse(url))
result[2] = ''
return urllib.parse.urlunparse(result)
def get_github_signature(secret, payload_data):
''' Generates the Github HMAC signature from the repository
*secret* and the *payload_data*. The GitHub signature is sent
with the ``X-Hub-Signature`` header. '''
return hmac.new(secret.encode('utf8'), payload_data, hashlib.sha1).hexdigest()
def get_bitbucket_signature(secret, payload_data):
''' Generates the Bitbucket HMAC signature from the repository
*secret* and the *payload_data*. The Bitbucket signature is sent
with the ``X-Hub-Signature`` header. '''
return hmac.new(secret.encode('utf8'), payload_data, hashlib.sha256).hexdigest()
def get_date_diff(date1, date2):
if (not date1) or (not date2):
if (not date1) and date2:
date1 = datetime.now()
else:
return '00:00:00'
diff = (date1 - date2) if date1 > date2 else (date2 - date1)
seconds = int(diff.seconds % 60)
minutes = int(((diff.seconds - seconds) / 60) % 60)
hours = int((diff.seconds - seconds - minutes * 60) / 3600)
return '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
def is_page_active(page, user):
path = request.path
if page == 'dashboard' and (not path or path == '/'):
return True
elif page == 'repositories' and (path.startswith('/repositories') or path.startswith('/repo') or path.startswith('/edit/repo') or path.startswith('/build') or path.startswith('/overrides')):
return True
elif page == 'users' and (path.startswith('/users') or (path.startswith('/user') and path != ('/user/' + str(user.id)))):
return True
elif page == 'profile' and path == ('/user/' + str(user.id)):
return True
elif page == 'integration' and path == '/integration':
return True
return False
def ping_repo(repo_url, repo = None):
if not repo_url or repo_url == '':
return 1
if repo and os.path.isfile(get_repo_private_key_path(repo)):
identity_file = get_repo_private_key_path(repo)
else:
identity_file = config.ssh_identity_file
ssh_cmd = ssh_command(None, identity_file=identity_file)
env = {'GIT_SSH_COMMAND': ' '.join(map(quote, ssh_cmd))}
ls_remote = ['git', 'ls-remote', '--exit-code', repo_url]
res = run(ls_remote, app.logger, env=env)
return res
def get_customs_path(repo):
return os.path.join(config.customs_dir, repo.name.replace('/', os.sep))
def get_override_path(repo):
return os.path.join(config.override_dir, repo.name.replace('/', os.sep))
def get_override_build_script_path(repo):
return os.path.join(get_override_path(repo), config.build_scripts[0])
def read_override_build_script(repo):
build_script_path = get_override_build_script_path(repo)
if os.path.isfile(build_script_path):
build_script_file = open(build_script_path, mode='r')
build_script = build_script_file.read()
build_script_file.close()
return build_script
return ''
def write_override_build_script(repo, build_script):
build_script_path = get_override_build_script_path(repo)
if build_script.strip() == '':
if os.path.isfile(build_script_path):
os.remove(build_script_path)
else:
makedirs(os.path.dirname(build_script_path))
build_script_file = open(build_script_path, mode='w')
build_script_file.write(build_script.replace('\r', ''))
build_script_file.close()
def get_public_key():
"""
Returns the servers SSH public key.
"""
# XXX Support all valid options and eventually parse the config file?
filename = config.ssh_identity_file or os.path.expanduser('~/.ssh/id_rsa')
if not filename.endswith('.pub'):
filename += '.pub'
if os.path.isfile(filename):
with open(filename) as fp:
return fp.read()
return None
def generate_ssh_keypair(public_key_comment):
"""
Generates new RSA ssh keypair.
Return:
tuple(str, str): generated private and public keys
"""
key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, key_size=4096)
private_key = key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode('ascii')
public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH).decode('ascii')
if public_key_comment:
public_key += ' ' + public_key_comment
return private_key, public_key
def get_repo_private_key_path(repo):
"""
Returns path of private key for repository from Customs folder.
Return:
str: path to custom private SSH key
"""
return os.path.join(get_customs_path(repo), 'id_rsa')
def get_repo_public_key_path(repo):
"""
Returns path of public key for repository from Customs folder.
Return:
str: path to custom public SSH key
"""
return os.path.join(get_customs_path(repo), 'id_rsa.pub')
| NiklasRosenstein/flux | flux/utils.py | Python | mit | 16,462 |
import hmac
import json
import urllib.parse
import subprocess
from .main import (
PullReqState,
parse_commands,
db_query,
INTERRUPTED_BY_HOMU_RE,
synchronize,
)
from . import utils
from . import gitlab
from .utils import lazy_debug
import jinja2
import requests
import pkg_resources
from bottle import (
get,
post,
run,
request,
redirect,
abort,
response,
)
from threading import Thread
import sys
import os
import traceback
from retrying import retry
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024 * 10
class G:
pass
g = G()
def find_state(sha):
for repo_label, repo_states in g.states.items():
for state in repo_states.values():
if state.merge_sha == sha:
return state, repo_label
raise ValueError('Invalid SHA')
def get_repo(repo_label, repo_cfg):
repo = g.repos[repo_label].gh
if not repo:
repo = g.gh.repository(repo_cfg['owner'], repo_cfg['name'])
g.repos[repo_label] = repo
assert repo.owner.login == repo_cfg['owner']
assert repo.name == repo_cfg['name']
return repo
@get('/')
def index():
return g.tpls['index'].render(repos=[g.repos[label]
for label in sorted(g.repos)])
@get('/queue/<repo_label:path>')
def queue(repo_label):
logger = g.logger.getChild('queue')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
single_repo_closed = None
if repo_label == 'all':
labels = g.repos.keys()
multiple = True
repo_url = None
else:
labels = repo_label.split('+')
multiple = len(labels) > 1
if repo_label in g.repos and g.repos[repo_label].treeclosed >= 0:
single_repo_closed = g.repos[repo_label].treeclosed
repo_url = '{}/{}/{}'.format(
g.cfg["gitlab"]["host"],
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
states = []
for label in labels:
try:
states += g.states[label].values()
except KeyError:
abort(404, 'No such repository: {}'.format(label))
pull_states = sorted(states)
rows = []
for state in pull_states:
treeclosed = (single_repo_closed or
state.priority < g.repos[state.repo_label].treeclosed)
status_ext = ''
if state.try_:
status_ext += ' (try)'
if treeclosed:
status_ext += ' [TREE CLOSED]'
rows.append({
'status': state.get_status(),
'status_ext': status_ext,
'priority': 'rollup' if state.rollup else state.priority,
'url': '{}/{}/{}/merge_requests/{}'.format(
g.cfg["gitlab"]["host"],
state.owner,
state.name,
state.num
),
'num': state.num,
'approved_by': state.approved_by,
'title': state.title,
'head_ref': state.head_ref,
'mergeable': ('yes' if state.mergeable is True else
'no' if state.mergeable is False else ''),
'assignee': state.assignee,
'repo_label': state.repo_label,
'repo_url': '{}/{}/{}'.format(
g.cfg["gitlab"]["host"],
state.owner,
state.name
),
'greyed': "treeclosed" if treeclosed else "",
})
return g.tpls['queue'].render(
repo_url=repo_url,
repo_label=repo_label,
treeclosed=single_repo_closed,
states=rows,
oauth_client_id=g.cfg['gitlab']['app_client_id'],
total=len(pull_states),
approved=len([x for x in pull_states if x.approved_by]),
rolled_up=len([x for x in pull_states if x.rollup]),
failed=len([x for x in pull_states if x.status == 'failure' or
x.status == 'error']),
multiple=multiple,
)
@get('/callback')
def callback():
logger = g.logger.getChild('callback')
response.content_type = 'text/plain'
code = request.query.code
state = json.loads(request.query.state)
lazy_debug(logger, lambda: 'state: {}'.format(state))
oauth_url = '{}/login/oauth/access_token'.format(
g.cfg["gitlab"]["host"],
)
try:
res = requests.post(oauth_url, data={
'client_id': g.cfg['gitlab']['app_client_id'],
'client_secret': g.cfg['gitlab']['app_client_secret'],
'code': code,
})
except Exception as ex:
logger.warn('/callback encountered an error '
'during gitlab oauth callback')
# probably related to https://gitlab.com/pycqa/flake8/issues/42
lazy_debug(logger, lambda: 'gitlab.oauth callback err: {}'.format(ex)) # noqa
abort(502, 'Bad Gateway')
args = urllib.parse.parse_qs(res.text)
token = args['access_token'][0]
repo_label = state['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
repo = get_repo(repo_label, repo_cfg)
user_gh = gitlab.login(token)
if state['cmd'] == 'rollup':
return rollup(user_gh, state, repo_label, repo_cfg, repo)
elif state['cmd'] == 'synch':
return synch(user_gh, state, repo_label, repo_cfg, repo)
else:
abort(400, 'Invalid command')
def rollup(user_gh, state, repo_label, repo_cfg, repo):
user_repo = user_gh.repository(user_gh.user().login, repo.name)
base_repo = user_gh.repository(repo.owner.login, repo.name)
nums = state.get('nums', [])
if nums:
try:
rollup_states = [g.states[repo_label][num] for num in nums]
except KeyError as e:
return 'Invalid PR number: {}'.format(e.args[0])
else:
rollup_states = [x for x in g.states[repo_label].values() if x.rollup]
rollup_states = [x for x in rollup_states if x.approved_by]
rollup_states.sort(key=lambda x: x.num)
if not rollup_states:
return 'No pull requests are marked as rollup'
base_ref = rollup_states[0].base_ref
base_sha = repo.ref('heads/' + base_ref).object.sha
gitlab.set_ref(
user_repo,
'heads/' + repo_cfg.get('branch', {}).get('rollup', 'rollup'),
base_sha,
force=True,
)
successes = []
failures = []
for state in rollup_states:
if base_ref != state.base_ref:
failures.append(state.num)
continue
merge_msg = 'Rollup merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
state.approved_by,
state.title,
state.body,
)
try:
rollup = repo_cfg.get('branch', {}).get('rollup', 'rollup')
user_repo.merge(rollup, state.head_sha, merge_msg)
except gitlab.CommonError as e:
if e.code != 409:
raise
failures.append(state.num)
else:
successes.append(state.num)
title = 'Rollup of {} pull requests'.format(len(successes))
body = '- Successful merges: {}\n\n- Failed merges: {}'.format(
', '.join('#{}'.format(x) for x in successes),
', '.join('#{}'.format(x) for x in failures),
)
try:
rollup = repo_cfg.get('branch', {}).get('rollup', 'rollup')
pull = base_repo.create_pull(
title,
state.base_ref,
user_repo.owner.login + ':' + rollup,
body,
)
except gitlab.CommonError as e:
return e.response.text
else:
redirect(pull.html_url)
@post('/gitlab')
def gitlab_hook():
logger = g.logger.getChild('gitlab')
response.content_type = 'text/plain'
info = request.json
lazy_debug(logger, lambda: 'info: {}'.format(utils.remove_url_keys_from_json(info))) # noqa
try:
path = urllib.parse.urlparse(info["repository"]["homepage"]).path
repo_parts = path.split("/")[1:]
except KeyError:
repo_parts = info["project"]["path_with_namespace"].split("/")
owner = '/'.join(repo_parts[0:-1])
repo_name = repo_parts[-1]
repo_label = g.repo_labels[owner, repo_name]
repo_cfg = g.repo_cfgs[repo_label]
if request.headers["X-Gitlab-Token"] != repo_cfg["gitlab"]["secret"]:
abort(400, 'Invalid signature')
event_type = request.headers['X-Gitlab-Event']
if event_type == "Note Hook":
if info["object_attributes"]["noteable_type"] == "MergeRequest":
event_type = "mr_comment"
lazy_debug(logger, lambda: "Got event_type {}".format(event_type))
if event_type == 'Merge Request Hook':
mr = info["object_attributes"]
action = mr['action']
pull_num = mr["iid"]
head_sha = mr["last_commit"]["id"]
source_repo_parts = mr["source"]["path_with_namespace"].split("/") # noqa
source_owner = "/".join(source_repo_parts[0:-1])
source_name = source_repo_parts[-1]
if action in ['open', 'reopen']:
state = PullReqState(
mr["id"],
pull_num, head_sha, '', g.db, repo_label,
g.mergeable_que, g.gh,
owner, repo_name,
g.repos,
)
state.title = mr['title']
state.body = mr['description']
state.head_ref = source_owner + ":" + mr["source_branch"] # noqa
state.base_ref = mr["target_branch"]
if mr["merge_status"] != "unchecked":
state.set_mergeable(mr["merge_status"] == "can_be_merged")
assignee = mr["assignee"]["username"] if mr.get("assignee") else ""
state.assignee = (assignee or '')
found = False
if action == 'reopen':
# FIXME: Review comments are ignored here
for c in gitlab.iter_issue_comments(
state.get_repo(), pull_num
):
found = parse_commands(
c.body,
c.author["username"],
repo_cfg,
state,
g.my_username,
g.db,
g.states,
) or found
status = ''
for info in gitlab.iter_statuses(
state.get_repo(),
state.head_sha,
):
if info.context == 'homu':
status = info.state
break
state.set_status(status)
state.save()
g.states[repo_label][pull_num] = state
if found:
g.queue_handler()
elif action in ['close', 'merge']:
state = g.states[repo_label][pull_num]
if hasattr(state, 'fake_merge_sha'):
def inner():
gitlab.set_ref(
state.get_repo(),
'heads/' + state.base_ref,
state.merge_sha,
force=True,
)
def fail(err):
state.add_comment(':boom: Failed to recover from the '
'artificial commit. See {} for details.'
' ({})'.format(state.fake_merge_sha,
err))
utils.retry_until(inner, fail, state)
del g.states[repo_label][pull_num]
db_query(g.db, 'DELETE FROM pull WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[repo_label, pull_num])
g.queue_handler()
elif action in ['assigne', 'unassigne']:
state = g.states[repo_label][pull_num]
assignee = mr["assignee"]["username"]
state.assignee = assignee or ''
state.save()
elif action == "update":
state = g.states[repo_label][pull_num]
if state.head_sha != head_sha:
state.head_advanced(head_sha)
state.save()
else:
lazy_debug(logger, lambda: 'Invalid pull_request action: {}'.format(action)) # noqa
elif event_type == 'Push Hook':
ref = info['ref'][len('refs/heads/'):]
for state in list(g.states[repo_label].values()):
if state.base_ref == ref:
state.set_mergeable(None, cause={
'sha': info['after'],
'title': info['commits'][0]['message'].splitlines()[0],
})
if state.head_sha == info['before']:
before_status = state.status
state.head_advanced(info['after'])
state.save()
if before_status == "pending":
state.add_comment(
":construction: Test has broken by new commit {:.7}."
.format(
info["after"],
)
)
elif event_type == 'mr_comment':
body = info["object_attributes"]["note"]
username = info['user']['username']
pull_num = info["merge_request"]["iid"]
mr = info["merge_request"]
state = g.states[repo_label].get(pull_num)
if mr["merge_status"] != "unchecked":
state.set_mergeable(mr["merge_status"] == "can_be_merged")
if state:
state.title = mr['title']
state.body = mr['description']
if parse_commands(
body,
username,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
):
state.save()
g.queue_handler()
elif event_type == 'Job Hook':
try:
state, repo_label = find_state(info['sha'])
except ValueError:
return 'OK'
lazy_debug(logger, lambda: 'Found state via: {}'.format(info["sha"])) # noqa
job_name = ""
for name, value in repo_cfg['job'].items():
if 'context' in value and value['context'] == info['build_name']: # noqa
job_name = name
if job_name is "":
return 'OK'
if info['build_status'] in ['running', 'created']:
return 'OK'
# for row in info['branches']:
# if row['name'] == state.base_ref:
# return 'OK'
target_url = "{}/-/jobs/{}".format(
info["repository"]["homepage"],
info["build_id"],
)
report_build_res(
info['build_status'] == 'success', target_url,
'job-' + job_name, state, logger, repo_cfg,
)
return 'OK'
def report_build_res(succ, url, builder, state, logger, repo_cfg):
lazy_debug(logger,
lambda: 'build result {}: builder = {}, succ = {}, current build_res = {}' # noqa
.format(state, builder, succ,
state.build_res_summary()))
state.set_build_res(builder, succ, url)
if succ:
if all(x['res'] for x in state.build_res.values()):
state.set_status('success')
desc = 'Test successful'
gitlab.create_status(
state.get_repo(), state.head_sha,
'success', url, desc, context='homu',
)
urls = ', '.join('[{}]({})'.format(builder, x['url']) for builder, x in sorted(state.build_res.items())) # noqa
test_comment = ':sunny: {} - {}'.format(desc, urls)
if state.approved_by and not state.try_:
comment = (test_comment + '\n\n' +
'Approved by: {}\n\nPushing {} to {}...'
).format(state.approved_by, state.merge_sha,
state.base_ref)
from .main import init_local_git_cmds
from .main import global_git_cfg
git_cmd = init_local_git_cmds(repo_cfg, global_git_cfg)
state.add_comment(comment)
try:
utils.logged_call(git_cmd('fetch', 'origin', state.base_ref)) # noqa
utils.logged_call(git_cmd("checkout", state.base_ref)) # noqa
utils.logged_call(git_cmd("reset", "--hard", "origin/" + state.base_ref)) # noqa
utils.logged_call(git_cmd("merge", "--ff-only", state.merge_sha)) # noqa
utils.logged_call(git_cmd("push", "origin", state.base_ref)) # noqa
except subprocess.CalledProcessError as e:
state.set_status('error')
desc = ('Test was successful, but fast-forwarding failed:'
' {}'.format(e))
gitlab.create_status(
state.get_repo(),
state.head_sha, 'canceled', url,
desc, context='homu',
)
state.add_comment(':eyes: ' + desc)
else:
comment = (test_comment + '\n\n' +
'State: approved={} try={}'
).format(state.approved_by, state.try_)
state.add_comment(comment)
else:
if state.status == 'pending':
state.set_status('failure')
desc = 'Test failed'
gitlab.create_status(
state.get_repo(), state.head_sha,
'failed', url, desc, context='homu',
)
state.add_comment(':broken_heart: {} - [{}]({})'.format(
desc,
builder,
url,
))
g.queue_handler()
@post('/buildbot')
def buildbot():
logger = g.logger.getChild('buildbot')
response.content_type = 'text/plain'
for row in json.loads(request.forms.packets):
if row['event'] == 'buildFinished':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if 'retry' in info['text']:
continue
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
lazy_debug(logger,
lambda: 'Invalid commit ID from Buildbot: {}'.format(props['revision'])) # noqa
continue
lazy_debug(logger, lambda: 'state: {}, {}'.format(state, state.build_res_summary())) # noqa
if info['builderName'] not in state.build_res:
lazy_debug(logger,
lambda: 'Invalid builder from Buildbot: {}'.format(info['builderName'])) # noqa
continue
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
build_succ = 'successful' in info['text'] or info['results'] == 0
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
if 'interrupted' in info['text']:
step_name = ''
for step in reversed(info['steps']):
if 'interrupted' in step.get('text', []):
step_name = step['name']
break
if step_name:
try:
url = ('{}/builders/{}/builds/{}/steps/{}/logs/interrupt' # noqa
).format(repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
step_name,)
res = requests.get(url)
except Exception as ex:
logger.warn('/buildbot encountered an error during '
'gitlab.logs request')
# probably related to
# https://gitlab.com/pycqa/flake8/issues/42
lazy_debug(logger, lambda: 'buildbot logs err: {}'.format(ex)) # noqa
abort(502, 'Bad Gateway')
mat = INTERRUPTED_BY_HOMU_RE.search(res.text)
if mat:
interrupt_token = mat.group(1)
if getattr(state, 'interrupt_token',
'') != interrupt_token:
state.interrupt_token = interrupt_token
if state.status == 'pending':
state.set_status('')
desc = (':snowman: The build was interrupted '
'to prioritize another pull request.')
state.add_comment(desc)
gitlab.create_status(
state.get_repo(),
state.head_sha,
'canceled', url,
desc,
context='homu',
)
g.queue_handler()
continue
else:
logger.error('Corrupt payload from Buildbot')
report_build_res(build_succ, url, info['builderName'],
state, logger, repo_cfg)
elif row['event'] == 'buildStarted':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
pass
else:
if info['builderName'] in state.build_res:
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
state.set_build_res(info['builderName'], None, url)
if g.buildbot_slots[0] == props['revision']:
g.buildbot_slots[0] = ''
g.queue_handler()
return 'OK'
def synch(user_gh, state, repo_label, repo_cfg, repo):
if not repo.is_collaborator(user_gh.user().login):
abort(400, 'You are not a collaborator')
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'Synchronizing {}...'.format(repo_label)
def synch_all():
@retry(wait_exponential_multiplier=1000, wait_exponential_max=600000)
def sync_repo(repo_label, g):
try:
synchronize(repo_label, g.repo_cfgs[repo_label], g.logger, g.gh,
g.states, g.repos, g.db, g.mergeable_que,
g.my_username, g.repo_labels)
except Exception:
print('* Error while synchronizing {}'.format(repo_label))
traceback.print_exc()
raise
for repo_label in g.repos:
sync_repo(repo_label, g)
print('* Done synchronizing all')
@post('/admin')
def admin():
if request.json['secret'] != g.cfg['web']['secret']:
return 'Authentication failure'
if request.json['cmd'] == 'repo_new':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
g.states[repo_label] = {}
g.repos[repo_label] = None
g.repo_cfgs[repo_label] = repo_cfg
g.repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'OK'
elif request.json['cmd'] == 'repo_del':
repo_label = request.json['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
db_query(g.db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
del g.states[repo_label]
del g.repos[repo_label]
del g.repo_cfgs[repo_label]
del g.repo_labels[repo_cfg['owner'], repo_cfg['name']]
return 'OK'
elif request.json['cmd'] == 'repo_edit':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
assert repo_cfg['owner'] == g.repo_cfgs[repo_label]['owner']
assert repo_cfg['name'] == g.repo_cfgs[repo_label]['name']
g.repo_cfgs[repo_label] = repo_cfg
return 'OK'
elif request.json['cmd'] == 'sync_all':
Thread(target=synch_all).start()
return 'OK'
return 'Unrecognized command'
def start(cfg, states, queue_handler, repo_cfgs, repos, logger,
buildbot_slots, my_username, db, repo_labels, mergeable_que, gh):
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(pkg_resources.resource_filename(__name__, 'html')), # noqa
autoescape=True,
)
tpls = {}
tpls['index'] = env.get_template('index.html')
tpls['queue'] = env.get_template('queue.html')
g.cfg = cfg
g.states = states
g.queue_handler = queue_handler
g.repo_cfgs = repo_cfgs
g.repos = repos
g.logger = logger.getChild('server')
g.buildbot_slots = buildbot_slots
g.tpls = tpls
g.my_username = my_username
g.db = db
g.repo_labels = repo_labels
g.mergeable_que = mergeable_que
g.gh = gh
# Synchronize all PR data on startup
if cfg['web'].get('sync_on_start', False):
Thread(target=synch_all).start()
app = bottle.app()
if cfg["web"].get("sentry_dsn"):
from raven import Client
from raven.contrib.bottle import Sentry
client = Client(cfg["web"]["sentry_dsn"])
app.catchall = False
app = Sentry(app, client)
try:
run(
app=app,
host=cfg['web'].get('host', '0.0.0.0'),
port=cfg['web']['port'],
server='waitress',
)
except OSError as e:
print(e, file=sys.stderr)
os._exit(1)
| coldnight/homu | homu/server.py | Python | mit | 27,593 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_python_2d_ns
----------------------------------
Tests for `python_2d_ns` module.
"""
import sys
import unittest
from python_2d_ns.python_2d_ns import *
class TestPython_2d_ns(unittest.TestCase):
#test x, y coordinates generated by function IC_coor
#assume use 2 threads and rank==1
#y coordinate should be the same as serial code
def test_IC_coor_y_coor(self):
x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2)
self.assertTrue(y[3,0]==-32)
self.assertTrue(y[3,5]==-27)
#x coordinate for rank 2 should start from 0
def test_IC_coor_x_coor(self):
x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2)
#this coordinate should be 0
self.assertTrue(x[0,2]==0)
#test initial condition, Taylor green forcing, test whether the value is given on specific wavenumber
def test_IC_con(self):
#generate kx, ky, assume 2 threads, rank==0
x, y, kx, ky, k2, k2_exp=IC_coor(32, 32, 16, 1, 1, 0, 2)
Vxhat, Vyhat=IC_condition(1, 2, kx, ky, 32, 16)
#this wavenumber should be zero
self.assertTrue(Vyhat[2,5]==0)
#this wavenumber should be non-zero
self.assertTrue(Vxhat[14,14]==0.5j)
#test dealiasing function, which will remove values in wavenumber >= Nx/3
def test_delias(self):
#generate kx, ky, assume 2 threads, rank==1
Vxhat=zeros((Nx, Np), dtype=complex);
Vyhat=zeros((Nx, Np), dtype=complex);
Vxhat[:]=1
Vxhat, Vyhat=delias(Vxhat, Vyhat, Nx, Np, k2)
#this should be zero
self.assertTrue(Vxhat[Nx-1,Np-1]==0)
self.assertTrue(Vyhat[Nx-1,Np-1]==0)
#test FFT and IFFT. Take FFT and IFFT on array, it will transform back (with some numerical errors)
def test_FFT(self):
testa=zeros((Np, Ny), dtype=float);
testahat=empty(( N, Np) , dtype = complex )
if rank==0:
testa[2,0]=1
testa=ifftn_mpi(fftn_mpi(testa, testahat), testa)
#after FFT and IFFT, this value should be the same
if rank==0:
self.assertTrue(testa[2,0]-1<0.0001)
if __name__ == '__main__':
sys.exit(unittest.main())
| xinbian/2dns | tests/test_python_2d_ns.py | Python | mit | 2,191 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# см. также http://habrahabr.ru/post/135863/
# "Как написать дополнение для GIMP на языке Python"
# Импортируем необходимые модули
from gimpfu import *
## fix
def bdfix(image, drawable, w0, c0, w1, c1):
# for Undo
pdb.gimp_context_push()
pdb.gimp_image_undo_group_start(image)
# border-0
pdb.gimp_image_resize(image, image.width + w0*2, image.height + w0*2, w0, w0)
cz = pdb.gimp_context_get_background()
pdb.gimp_context_set_background(c0)
pdb.gimp_image_flatten(image)
pdb.gimp_context_set_background(cz)
# border-1
pdb.gimp_image_resize(image, image.width + w1*2, image.height + w1*2, w1, w1)
cz = pdb.gimp_context_get_background()
pdb.gimp_context_set_background(c1)
pdb.gimp_image_flatten(image)
pdb.gimp_context_set_background(cz)
# Refresh
pdb.gimp_displays_flush()
# Undo
pdb.gimp_image_undo_group_end(image)
pdb.gimp_context_pop()
# Регистрируем функцию в PDB
register(
"python-fu-bdfix", # Имя регистрируемой функции
"Добавление рамки к изображению", # Информация о дополнении
"Помещает вокруг изображения рамку", # Короткое описание выполняемых скриптом действий
"Александр Лубягин", # Информация об авторе
"Александр Лубягин", # Информация о правах
"15.01.2015", # Дата изготовления
"Добавить рамку", # Название пункта меню, с помощью которого дополнение будет запускаться
"*", # Типы изображений, с которыми работает дополнение
[
(PF_IMAGE, "image", "Исходное изображение", None), # Указатель на изображение
(PF_DRAWABLE, "drawable", "Исходный слой", None), # Указатель на слой
(PF_INT, "w0", "Ширина рамки, px", "9"), # Ширина рамки
(PF_COLOR, "c0", "Цвет рамки", (255,255,255)), # Цвет рамки
(PF_INT, "w1", "Ширина рамки, px", "1"), # Ширина рамки
(PF_COLOR, "c1", "Цвет рамки", (0,0,0)) # Цвет рамки
],
[], # Список переменных которые вернет дополнение
bdfix, menu="<Image>/ТЕСТ/") # Имя исходной функции, и меню, в которое будет помещён пункт
# Запускаем скрипт
main()
| werkzeuge/gimp-plugins-simplest | bdfix.py | Python | mit | 2,693 |
# coding:utf-8
"""
Django settings for turbo project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import datetime
import os
import turbosettings.parameters as parameters
from turbosettings.generate_secret_key import secret_key_from_file
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
USE_X_FORWARDED_HOST = False
FORCE_SCRIPT_NAME = ""
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = secret_key_from_file('secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'songwriter',
'corsheaders',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'turbosettings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': parameters.TEMPLATES_DIRS if parameters.TEMPLATES_DIRS else [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
'builtins': [
'django.templatetags.i18n',
'django.contrib.humanize.templatetags.humanize',
'django.contrib.staticfiles.templatetags.staticfiles',
],
},
},
]
WSGI_APPLICATION = 'turbosettings.wsgi.application'
CORS_ORIGIN_WHITELIST = [
'localhost:8080',
'127.0.0.1:8080',
]
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = "Europe/Paris"
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda x: x
LANGUAGES = (
('fr', gettext('Français')),
('en', gettext('English')),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = FORCE_SCRIPT_NAME + "/static/"
STATIC_ROOT = BASE_DIR + '/static/'
STATICFILES_DIRS = parameters.STATICFILES_DIRS if parameters.STATICFILES_DIRS else (
"assets/",
)
FIXTURE_DIRS = (
'fixtures/'
)
MEDIA_URL = '/'
MEDIA_ROOT = BASE_DIR + '/media/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
JWT_AUTH = {
'JWT_SECRET_KEY': secret_key_from_file('secret_key_jwt'),
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=18000),
}
# For debug toolbar
INTERNAL_IPS = ["127.0.0.1"]
from turbosettings.settings_local import *
| giliam/turbo-songwriter | backend/turbosettings/settings.py | Python | mit | 4,924 |
from distutils.core import Extension
from collections import defaultdict
def get_extensions():
import numpy as np
exts = []
# malloc
mac_incl_path = "/usr/include/malloc"
cfg = defaultdict(list)
cfg['include_dirs'].append(np.get_include())
cfg['include_dirs'].append(mac_incl_path)
cfg['include_dirs'].append('gala/potential')
cfg['extra_compile_args'].append('--std=gnu99')
cfg['sources'].append('gala/integrate/cyintegrators/leapfrog.pyx')
cfg['sources'].append('gala/potential/potential/src/cpotential.c')
exts.append(Extension('gala.integrate.cyintegrators.leapfrog', **cfg))
cfg = defaultdict(list)
cfg['include_dirs'].append(np.get_include())
cfg['include_dirs'].append(mac_incl_path)
cfg['include_dirs'].append('gala/potential')
cfg['extra_compile_args'].append('--std=gnu99')
cfg['sources'].append('gala/potential/hamiltonian/src/chamiltonian.c')
cfg['sources'].append('gala/potential/potential/src/cpotential.c')
cfg['sources'].append('gala/integrate/cyintegrators/dop853.pyx')
cfg['sources'].append('gala/integrate/cyintegrators/dopri/dop853.c')
exts.append(Extension('gala.integrate.cyintegrators.dop853', **cfg))
cfg = defaultdict(list)
cfg['include_dirs'].append(np.get_include())
cfg['include_dirs'].append(mac_incl_path)
cfg['include_dirs'].append('gala/potential')
cfg['extra_compile_args'].append('--std=gnu99')
cfg['sources'].append('gala/integrate/cyintegrators/ruth4.pyx')
cfg['sources'].append('gala/potential/potential/src/cpotential.c')
exts.append(Extension('gala.integrate.cyintegrators.ruth4', **cfg))
return exts
| adrn/gala | gala/integrate/setup_package.py | Python | mit | 1,672 |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class WorkspaceShortcut(Document):
pass
| mhbu50/frappe | frappe/desk/doctype/workspace_shortcut/workspace_shortcut.py | Python | mit | 235 |
N = int(input())
ans = [0] * N
for i in range(0, N, 5):
q = [0] * N
for j in range(i, min(N, i + 5)):
q[j] = 10 ** (j - i)
print('? {}'.format(' '.join(map(str, q))), flush=True)
S = str(int(input().strip()) - sum(q) * 7)[::-1]
for j in range(i, min(N, i + 5)):
ans[j] = (int(S[j - i]) % 2) ^ 1
print('! {}'.format(' '.join(map(str, ans))), flush=True)
| knuu/competitive-programming | atcoder/corp/codethanksfes2017_e.py | Python | mit | 389 |
import glob
import json
from os.path import basename, dirname, realpath
from BeautifulSoup import BeautifulSoup
from flask import Response, request, render_template, send_from_directory
from annotaria import app
from store import Store
app.config.from_object(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
SPARQL_ENDPOINT="http://localhost:3030/annotaria",
DEBUG=True
))
app.config.from_envvar('ANNOTARIA_SETTINGS', silent=True)
# We define our own jsonify rather than using flask.jsonify because we wish
# to jsonify arbitrary objects (e.g. index returns a list) rather than kwargs.
def jsonify(obj, *args, **kwargs):
res = json.dumps(obj, indent=None if request.is_xhr else 2)
return Response(res, mimetype='application/json', *args, **kwargs)
# html = lxml.html.document_fromstring(html)
# for element, attribute, link, pos in html.iterlinks():
# if attribute == "src":
# new_src = 'articles/images/' + basename(link)
# element.set('src', new_src)
# print lxml.html.tostring(html)
def parse_article(html):
soup = BeautifulSoup(html)
# fix img "src" attribute
for img in soup.findAll('img'):
img['src'] = 'articles/images/' + basename(img['src'])
return {
'title': soup.title.string,
'body': str(soup.body)
}
# ## ROUTING ###
# root
@app.route('/')
def root():
return render_template('index.html')
# retrieve articles list
@app.route('/articles', methods=['GET'])
def get_articles():
path = dirname(realpath(__file__))
ret = []
for f in sorted(glob.glob(path + "/articles/*.html")):
if basename(f) != "index.html": # skip index
ret.append({
'href': basename(f),
'title': basename(f)
})
return jsonify(ret)
# retrieve a single article
@app.route('/article/<file_name>', methods=['GET'])
def get_article(file_name):
try:
path = dirname(realpath(__file__))
with open(path + '/articles/' + file_name, 'r') as content_file:
ret = parse_article(content_file.read())
except Exception, e:
raise e
return jsonify(ret)
# proxy article images
@app.route('/articles/images/<file_name>', methods=['GET'])
def get_article_image(file_name):
try:
path = dirname(realpath(__file__))
return send_from_directory(path + '/articles/images/', file_name)
except Exception, e:
raise e
# get all annotations for a single article
@app.route('/annotations/<article>', methods=['GET'])
def get_annotations(article):
store = Store(app.config['SPARQL_ENDPOINT'])
return jsonify(store.query_article(article))
# store one or more annotations in the triple store
@app.route('/annotations/', methods=['POST'])
def set_annotations():
store = Store(app.config['SPARQL_ENDPOINT'])
annotations = json.loads(request.form['data'])
return jsonify(store.store_annotations(annotations))
# store one or more annotations in the triple store
@app.route('/person', methods=['GET'])
def get_person():
store = Store(app.config['SPARQL_ENDPOINT'])
return jsonify(store.query_authors())
# store one or more annotations in the triple store
@app.route('/person/', methods=['POST'])
def set_person():
store = Store(app.config['SPARQL_ENDPOINT'])
return store.insert_author(json.loads(request.form['data']))
# store one or more annotations in the triple store
@app.route('/organization', methods=['GET'])
def get_organization():
store = Store(app.config['SPARQL_ENDPOINT'])
return jsonify(store.query_organization())
# store one or more annotations in the triple store
@app.route('/organization/', methods=['POST'])
def set_organization():
store = Store(app.config['SPARQL_ENDPOINT'])
return store.insert_organization(json.loads(request.form['data']))
# store one or more annotations in the triple store
@app.route('/place', methods=['GET'])
def get_place():
store = Store(app.config['SPARQL_ENDPOINT'])
return jsonify(store.query_place())
# store one or more annotations in the triple store
@app.route('/place/', methods=['POST'])
def set_place():
store = Store(app.config['SPARQL_ENDPOINT'])
return store.insert_place(json.loads(request.form['data']))
# store one or more annotations in the triple store
@app.route('/disease', methods=['GET'])
def get_disease():
store = Store(app.config['SPARQL_ENDPOINT'])
return jsonify(store.query_concept())
# store one or more annotations in the triple store
@app.route('/disease/', methods=['POST'])
def set_disease():
store = Store(app.config['SPARQL_ENDPOINT'])
return store.insert_concept(json.loads(request.form['data']))
# store one or more annotations in the triple store
@app.route('/subject', methods=['GET'])
def get_subject():
store = Store(app.config['SPARQL_ENDPOINT'])
return jsonify(store.query_concept())
# store one or more annotations in the triple store
@app.route('/subject/', methods=['POST'])
def set_subject():
store = Store(app.config['SPARQL_ENDPOINT'])
return store.insert_concept(json.loads(request.form['data']))
# store one or more annotations in the triple store
@app.route('/dbpedia', methods=['GET'])
def get_dbpedia():
store = Store(app.config['SPARQL_ENDPOINT'])
return jsonify(store.query_concept())
# store one or more annotations in the triple store
@app.route('/dbpedia/', methods=['POST'])
def set_dbpedia():
store = Store(app.config['SPARQL_ENDPOINT'])
return store.insert_concept(json.loads(request.form['data']))
| ciromattia/annotaria | annotaria/views.py | Python | mit | 5,625 |
import curses
from cursesmenu import clear_terminal
from cursesmenu.items import MenuItem
class ExternalItem(MenuItem):
"""
A base class for items that need to do stuff on the console outside of curses mode.
Sets the terminal back to standard mode until the action is done.
Should probably be subclassed.
"""
def __init__(self, text, menu=None, should_exit=False):
# Here so Sphinx doesn't copy extraneous info from the superclass's docstring
super(ExternalItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
def set_up(self):
"""
This class overrides this method
"""
self.menu.pause()
curses.def_prog_mode()
clear_terminal()
self.menu.clear_screen()
def clean_up(self):
"""
This class overrides this method
"""
self.menu.clear_screen()
curses.reset_prog_mode()
curses.curs_set(1) # reset doesn't do this right
curses.curs_set(0)
self.menu.resume()
| mholgatem/GPIOnext | cursesmenu/items/external_item.py | Python | mit | 1,040 |
from .voxel_dir import task_dir, storage_dir, image_dir | andyneff/voxel-globe | voxel_globe/tools/__init__.py | Python | mit | 55 |
""" Contains a few template tags relating to autotags
Specifically, the autotag tag itself, and the filters epoch and cstag.
"""
from django import template
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from tags.models import Tag
import datetime
import calendar
import types
register = template.Library()
@register.inclusion_tag("autotags/autotag.html")
def autotag(autotag, untag=None, **kwargs):
""" Displays an autotag as a block
An autotag block is a HTML element with the class "block", surprisingly enough, and consists of a head and a body.
The head is always visible, but the body is only visible when you click the button, and contains buttons to edit the
autotag.
"""
kwargs["at"] = autotag
kwargs["tags"] = Tag.expand_implies_check(autotag.tags.all())
kwargs["colour"] = "white"
for (t, _) in kwargs["tags"]:
if t.colour != "white":
kwargs["colour"] = t.colour
break
return kwargs
@register.filter(expects_localtime=True)
def epoch(value):
""" Convert datetime object into seconds from epoch """
if isinstance(value, datetime.datetime):
return int(calendar.timegm(value.timetuple()))
return ''
@register.filter()
def cstag(value):
""" Return the tags for this bookmark as a comma seperated list """
return ",".join(map((lambda t: t.slug.replace(",", "")), Tag.expand_implies(value.tags.all())))
| RossBrunton/BMAT | autotags/templatetags/autotag.py | Python | mit | 1,478 |
# -*- coding: utf-8 -*-
import os
from .. import OratorTestCase
from . import IntegrationTestCase
class MySQLIntegrationTestCase(IntegrationTestCase, OratorTestCase):
@classmethod
def get_manager_config(cls):
ci = os.environ.get("CI", False)
if ci:
database = "orator_test"
user = "root"
password = ""
else:
database = "orator_test"
user = "orator"
password = "orator"
return {
"default": "mysql",
"mysql": {
"driver": "mysql",
"database": database,
"user": user,
"password": password,
},
}
def get_marker(self):
return "%s"
| sdispater/orator | tests/integrations/test_mysql.py | Python | mit | 764 |
#!/Users/Drake/dev/LouderDev/louderdev/bin/python3
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
| drakeloud/louderdev | louderdev/bin/explode.py | Python | mit | 2,470 |
import json, random, time, sys
"""
Creating a random JSON object based on lists of info and random numbers
to assign the index
"""
##Directory
input_file = "test.json"
###Success message takes the file_name and operation type (ie. written, closed)
def process_message(outcome, file_name, operation_type):
print "*******%s File: %s %s *******" % (outcome, file_name, operation_type)
##Open file
try:
open_file=open(input_file, 'w')
print "File opened"
except:
print "Error opening "+input_file
##Random chooser-random number picker function to be used over and over, but needs to be created before called
##To keep everything clean it's listed before the others funtions so that they maybe listed in order of the dictionary keys
def random_chooser(start,end):
return random.randrange(start,end)
##Lists of info
doctors_name=["Dr_K", "Dr. Pepper", "Dr. Lector", "Dr. Seus", "Dr Dre", "Dr. Phill", "Dr. Glass"]
special_notes_list=["No more doctors available for the weekend", "All offices closed for Labor Day", "Offices closed till Monday for Christmas",
"No Dr. on call Saturdays", "No Dr. on call Fridays", "No Dr. on call Mondays", "No Dr. on call Wednesdays" ,"No Dr. on call Tuesdays",
"Office closed for snow"]
dates=["1/17/2013","12/02/2011", "11/08/2012", "4/1/2010", "5/23/2011","1/15/2013","12/02/2010", "12/08/2012", "6/1/2010", "7/23/2011"]
first_name=["Bob", "Peter", "Jim", "Gerry", "Jean", "Robert", "Susan", "Mary", "Jo", "Brian"]
last_name=["Cameron", "Bender", "Neutron", "Simmons", "Jackson", "Smith", "Gardner", "Crocker","Black", "White"]
from_place=["Fort Worth","Plano","Houston","Little Rock","Detroit","Memphis", "Dallas","Arlington","Jenks","Chicago","Tulsa", "Boise", "Desmoins", "Minnieapolis", "St. Louis"]
check_list=["5647","7610","1230","3210","6543","9874","1324","3215","5897","6546","5968","6540"]
content_list=["Nice to see you!", "This is a content message", "This is another content message" ,"This is a test message to verify that the content is coming through",
"This is the content you are looking for","Content is magically here","Some content","Test content for your viewing pleasure",
"This is a test of the call_manager content system","Testing...testing...1...2...3!","Testing...testing...4...5...6!"]
##Keys for the dictionary
messages_info_keys = ["date_and_time", "caller_name", "from", "call_back_number", "call_back_ext", "check_number", "content"]
##Random pick of date from list dates
def date_picker():
picked_date=random_chooser(1,len(dates))
new_date=dates[picked_date]
return new_date
##creates a full name from lists first_name and last_name
def pick_your_name():
first=random_chooser(1,len(first_name))
last=random_chooser(1,10)
combo_name =first_name[first]+" "+last_name[last]
return combo_name
##Random pick of location from list from_place
def random_place():
picked_place=random_chooser(1,len(from_place))
place=from_place[picked_place]
return place
##Random number generator with randint from the random module
def random_number_maker(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
##combines a group of random numbers to resemble a phone number
def random_phone_number():
return "%s-%s-%s" %(str(random_number_maker(3)), str(random_number_maker(3)), str(random_number_maker(4)))
##call_back_ext picker, uses random number to generate number
def random_ext():
extension_maker=random_chooser(111,999)
return extension_maker
## not needed using random phone number generator
#call_back=[1,65,3,5,7,88]
##Random check number picker from list check_list
def check():
check_picker=random_chooser(1,10)
check=check_list[check_picker]
#=[1,2,3,5,6,8,98]
return check
##Random content picker from list content_list
def content():
content_picker=random_chooser(1,len(content_list))
content=content_list[content_picker]
return content
##Generates a random number of message items
def messages_list_random_maker():
x=0
lister_maker=[]
while(x<random_chooser(1,20)):
messages_info_values = [date_picker(),pick_your_name(),random_place(),random_phone_number(),random_ext(),check(), content()]
messages_info_list = dict(zip(messages_info_keys, messages_info_values))
lister_maker.append(messages_info_list)
x=x+1
return lister_maker
##dictionaries of info
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
messages_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
##Main area that puts everything together
doctors_list=[]
for name in doctors_name:
random_number=random.randrange(0,10)
special_notes_random_number=random.randrange(0,len(special_notes_list))
special_notes=special_notes_list[special_notes_random_number]
acct_number=random_number_maker(4)
ticket_number = abs(random_number-10)+1
duration_of_call = abs(random_number-10)+1
listerine = messages_list_random_maker()
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
doctors_list.append({"doctors_name":name, "special_notes":special_notes, "acct_number":acct_number,
"ticket_number":ticket_number, "duration_of_call":duration_of_call, "call_status": "ringing", "account_info": account_info_dict,
"messages":listerine})
##Dumps the dict to a jason object
jasoner=json.dumps(doctors_list)
#print jasoner
##Count up percentage of completion
for i in range(100):
print "\r", str(i)+"%"
time.sleep(.025)
print "\r"
##Write file
try:
open_file.write(jasoner)
process_message("SUCCESS", input_file, "Written")
except:
process_message("FAILURE" , input_file, "Not Written")
##Close file
try:
open_file.close()
process_message("SUCCESS", input_file, "Closed")
except:
process_message("FAILURE" , input_file, "Not Closed")
| bdeangelis/call_man_bat | static/frontend/jsonmaking.py | Python | mit | 6,080 |
#!/usr/bin/python3
import tkinter
import PIL.Image
import PIL.ImageTk
from tkinter.ttk import Progressbar as pbar
from PyFont import Font, SVG
class TkFont():
CHARY = 200
CHARX = 50
LINEY = CHARY / 2
MAIN_COLOR = '#FFFFFF'
def set_label(self):
tmp = self.words[-1].export_png_to_str()
photoImg = PIL.Image.open(tmp)
w, h = photoImg.size
nh = self.CHARY
coef = nh / h
nw = int(w * coef)
self.tmpx[-1] = int(self.words[-1].colors[SVG.SVG.LINK_COLOR_RIGHT].x *
coef) + self.CHARX
photoImg = photoImg.resize((nw, nh))#, PIL.Image.ANTIALIAS)
pix = photoImg.load()
found = False
miny = 0
for y in range(nh):
for x in range(nw):
if pix[x, y] != (0, 0, 0, 0):
miny = y
found = True
break
if found:
break
found = False
maxy = 0
for y in range(nh):
for x in range(nw):
if pix[x, nh - y - 1] != (0, 0, 0, 0):
maxy = nh - y - 1
found = True
break
if found:
break
if found:
photoImg = photoImg.crop((0, miny, nw, maxy))
photo = PIL.ImageTk.PhotoImage(photoImg)
self.labels[-1].place_forget()
self.labels[-1] = tkinter.Label(self.win, image=photo)
self.labels[-1].config(background=self.MAIN_COLOR)
self.labels[-1].image = photo
self.labels[-1].place(x = self.x[-1], y = self.y + miny)
def __init__(self, win, font, gui):
self.win = win
self.gui = gui
self.font = font
self.string = ""
self.words = []
self.labels = []
self.y = 0
self.x = [0]
self.tmpx = [0]
def backspace(self):
if not self.string:
return
if self.string[-1] == "\n":
self.tmpx = self.tmpx[:-1]
self.x = self.x[:-1]
self.y -= self.LINEY
elif self.string[-1] == " ":
self.tmpx = self.tmpx[:-1]
self.x[-1] -= self.tmpx[-1]
else:
self.words[-1].backspace()
self.set_label()
if self.string[-2:-1] in ["\n", " ", ""]:
self.words[-1].backspace()
self.words = self.words[:-1]
self.labels[-1].place_forget()
self.labels = self.labels[:-1]
self.string = self.string[:-1]
def ret(self):
self.y += self.LINEY
self.x += [0]
self.tmpx += [0]
self.string += "\n"
def space(self):
self.x[-1] += self.tmpx[-1]
self.tmpx += [0]
self.string += " "
def handle_char(self, c):
c = c.lower()
if c == "\b":
self.backspace()
elif c == "\r":
self.ret()
elif c == " ":
self.space()
elif c in self.font.chars:
svg = self.font.chr2svg(c)
if self.string[-1:] in ["\n", " ", ""]:
self.words += [svg]
self.labels += [tkinter.Label(self.win)]
else:
self.words[-1].link_with(svg)
self.set_label()
self.string += c
def theend(self):
if self.words:
svg = self.font.generate_svg("")
word = False
for c in self.string:
if c == " ":
word = False
svg.link_with(self.font.chr2svg(" "))
elif c == "\n":
word = False
svg.newline()
elif not word:
word = True
svg.link_with(self.words[0])
self.words = self.words[1:]
# bar.value += 100 / barlen
self.gui.the_end(svg)
self.win.destroy()
def export(self):
if self.words:
svg = self.font.generate_svg("")
word = False
for c in self.string:
if c == " ":
word = False
svg.link_with(self.font.chr2svg(" "))
elif c == "\n":
word = False
svg.newline()
elif not word:
word = True
svg.link_with(self.words[0])
self.words = self.words[1:]
self.gui.the_end(svg)
def get_svg(self):
if self.words:
svg = self.font.generate_svg("")
word = False
for c in self.string:
if c == " ":
word = False
svg.link_with(self.font.chr2svg(" "))
elif c == "\n":
word = False
svg.newline()
elif not word:
word = True
svg.link_with(self.words[0])
self.words = self.words[1:]
return svg
return None
| chichaj/PyFont | PyFont/FontRuntime.py | Python | mit | 5,128 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('computing', '0004_auto_20141127_1425'),
]
operations = [
migrations.CreateModel(
name='Subnet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('from_ip', models.CharField(max_length=15)),
('to_ip', models.CharField(max_length=15)),
],
options={
},
bases=(models.Model,),
),
migrations.DeleteModel(
name='Category',
),
migrations.AddField(
model_name='computer',
name='subnet',
field=models.ForeignKey(blank=True, to='computing.Subnet', null=True),
preserve_default=True,
),
]
| tamasgal/rlogbook | rlogbook/computing/migrations/0005_auto_20141127_1436.py | Python | mit | 1,018 |
# https://www.w3resource.com/python-exercises/
# 1. Write a Python program to print the following string in a specific format (see the output).
# Sample String : "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond
# in the sky. Twinkle, twinkle, little star, How I wonder what you are" Output :
# Twinkle, twinkle, little star,
# How I wonder what you are!
# Up above the world so high,
# Like a diamond in the sky.
# Twinkle, twinkle, little star,
# How I wonder what you are
string = """
Twinkle, twinkle, little star,
\t\tUp above the world so high,
\t\tLike a diamond in the sky.
Twinkle, twinkle, little star,
\tHow I wonder what you are
"""
print string
| dadavidson/Python_Lab | Python-w3resource/Python_Basic/ex01.py | Python | mit | 715 |
"""Test the print-to-python-file module
This just uses the simpleparsegrammar declaration, which is
parsed, then linearised, then loaded as a Python module.
"""
import os, unittest
import test_grammarparser
testModuleFile = 'test_printers_garbage.py'
class PrintersTests(test_grammarparser.SimpleParseGrammarTests):
def setUp( self ):
from simpleparse import simpleparsegrammar, parser, printers, baseparser
p = parser.Parser( simpleparsegrammar.declaration, 'declarationset')
open(testModuleFile,'w').write(printers.asGenerator( p._generator ))
import test_printers_garbage
reload( test_printers_garbage )
class RParser( test_printers_garbage.Parser, baseparser.BaseParser ):
pass
self.recursiveParser = RParser()
def tearDown( self ):
try:
os.remove( testModuleFile )
except IOError, err:
pass
def doBasicTest(self, parserName, testValue, expected, ):
result = self.recursiveParser.parse( testValue, production=parserName )
assert result == expected, '''\nexpected:%s\n got:%s\n'''%( expected, result )
def getSuite():
return unittest.makeSuite(PrintersTests,'test')
if __name__ == "__main__":
unittest.main(defaultTest="getSuite")
| alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/simpleparse/tests/test_printers.py | Python | mit | 1,186 |
import numpy as np
import pytest
from nilabels.tools.image_colors_manipulations.relabeller import relabeller, permute_labels, erase_labels, \
assign_all_other_labels_the_same_value, keep_only_one_label, relabel_half_side_one_label
def test_relabeller_basic():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, range(10), range(10)[::-1])
np.testing.assert_array_equal(relabelled_data, np.array(range(10)[::-1]).reshape(2,5))
def test_relabeller_one_element():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, 0, 1, verbose=1)
expected_output = data[:]
expected_output[0, 0] = 1
np.testing.assert_array_equal(relabelled_data, expected_output)
def test_relabeller_one_element_not_in_array():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, 15, 1, verbose=1)
np.testing.assert_array_equal(relabelled_data, data)
def test_relabeller_wrong_input():
data = np.array(range(10)).reshape(2, 5)
with np.testing.assert_raises(IOError):
relabeller(data, [1, 2], [3, 4, 4])
def test_permute_labels_invalid_permutation():
invalid_permutation = [[3, 3, 3], [1, 1, 1]]
with pytest.raises(IOError):
permute_labels(np.zeros([3, 3]), invalid_permutation)
def test_permute_labels_valid_permutation():
data = np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
valid_permutation = [[1, 2, 3], [1, 3, 2]]
perm_data = permute_labels(data, valid_permutation)
expected_data = np.array([[1, 3, 2],
[1, 3, 2],
[1, 3, 2]])
np.testing.assert_equal(perm_data, expected_data)
def test_erase_label_simple():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = erase_labels(data, 1)
expected_output = data[:]
expected_output[0, 1] = 0
np.testing.assert_array_equal(data_erased_1, expected_output)
def test_assign_all_other_labels_the_same_values_simple():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = erase_labels(data, 1)
data_labels_to_keep = assign_all_other_labels_the_same_value(data, range(2, 10), same_value_label=0)
np.testing.assert_array_equal(data_erased_1, data_labels_to_keep)
def test_assign_all_other_labels_the_same_values_single_value():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = np.zeros_like(data)
data_erased_1[0, 1] = 1
data_labels_to_keep = assign_all_other_labels_the_same_value(data, 1, same_value_label=0)
np.testing.assert_array_equal(data_erased_1, data_labels_to_keep)
def test_keep_only_one_label_label_simple():
data = np.array(range(10)).reshape(2, 5)
new_data = keep_only_one_label(data, 1)
expected_data = np.zeros([2, 5])
expected_data[0, 1] = 1
np.testing.assert_array_equal(new_data, expected_data)
def test_keep_only_one_label_label_not_present():
data = np.array(range(10)).reshape(2, 5)
new_data = keep_only_one_label(data, 120)
np.testing.assert_array_equal(new_data, data)
def test_relabel_half_side_one_label_wrong_input_shape():
data = np.array(range(10)).reshape(2, 5)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above',
axis='x', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_side():
data = np.array(range(27)).reshape(3, 3, 3)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='spam',
axis='x', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_axis():
data = np.array(range(27)).reshape(3, 3, 3)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above',
axis='spam', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_simple():
data = np.array(range(3 ** 3)).reshape(3, 3, 3)
# Z above
new_data = relabel_half_side_one_label(data, label_old=1, label_new=100, side_to_modify='above',
axis='z', plane_intercept=1)
expected_data = data[:]
expected_data[0, 0, 1] = 100
np.testing.assert_array_equal(new_data, expected_data)
# Z below
new_data = relabel_half_side_one_label(data, label_old=3, label_new=300, side_to_modify='below',
axis='z', plane_intercept=2)
expected_data = data[:]
expected_data[0, 1, 0] = 300
np.testing.assert_array_equal(new_data, expected_data)
# Y above
new_data = relabel_half_side_one_label(data, label_old=8, label_new=800, side_to_modify='above',
axis='y', plane_intercept=1)
expected_data = data[:]
expected_data[0, 2, 2] = 800
np.testing.assert_array_equal(new_data, expected_data)
# Y below
new_data = relabel_half_side_one_label(data, label_old=6, label_new=600, side_to_modify='below',
axis='y', plane_intercept=2)
expected_data = data[:]
expected_data[0, 2, 0] = 600
np.testing.assert_array_equal(new_data, expected_data)
# X above
new_data = relabel_half_side_one_label(data, label_old=18, label_new=180, side_to_modify='above',
axis='x', plane_intercept=1)
expected_data = data[:]
expected_data[2, 0, 0] = 180
np.testing.assert_array_equal(new_data, expected_data)
# X below
new_data = relabel_half_side_one_label(data, label_old=4, label_new=400, side_to_modify='below',
axis='x', plane_intercept=2)
expected_data = data[:]
expected_data[0, 1, 1] = 400
np.testing.assert_array_equal(new_data, expected_data)
if __name__ == '__main__':
test_relabeller_basic()
test_relabeller_one_element()
test_relabeller_one_element_not_in_array()
test_relabeller_wrong_input()
test_permute_labels_invalid_permutation()
test_permute_labels_valid_permutation()
test_erase_label_simple()
test_assign_all_other_labels_the_same_values_simple()
test_assign_all_other_labels_the_same_values_single_value()
test_keep_only_one_label_label_simple()
test_keep_only_one_label_label_not_present()
test_relabel_half_side_one_label_wrong_input_shape()
test_relabel_half_side_one_label_wrong_input_side()
test_relabel_half_side_one_label_wrong_input_axis()
test_relabel_half_side_one_label_wrong_input_simple()
| SebastianoF/LabelsManager | tests/tools/test_image_colors_manip_relabeller.py | Python | mit | 6,757 |
# Problem: Search for a Range
#
# Given a sorted array of integers, find the starting and ending position of a given target value.
#
# Your algorithm's runtime complexity must be in the order of O(log n).
#
# If the target is not found in the array, return [-1, -1].
#
# For example,
#
# Given [5, 7, 7, 8, 8, 10] and target value 8,
#
# return [3, 4].
#
################################################################################
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
low, high = 0, len(nums) - 1
while low <= high:
mid = (low + high) / 2
if nums[mid] < target:
low = mid + 1
elif nums[mid] > target:
high = mid - 1
else:
start, end = mid, mid
while start - 1 >= 0 and nums[start - 1] == nums[mid]:
start -= 1
while end + 1 <= (len(nums) - 1) and nums[end + 1] == nums[mid]:
end += 1
return [start, end]
return [-1, -1]
| samlaudev/LeetCode | Python/Search for a Range/Solution.py | Python | mit | 1,176 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
plot the results from the files igraph_degree_assort_study and degree_assortativity
'''
from igraph import *
import os
import numpy as np
import matplotlib.pyplot as plt
#########################
IN_DIR = '/home/sscepano/Projects7s/Twitter-workspace/ALL_SR'
img_out_plot = "7MOda_unweighted.png"
#########################
#########################
# read from a file the res
#########################
def read_in_res():
f = open('7MODeg_assort_study.weighted_edge_list', 'r')
DA = []
TH = []
for line in f:
if line.startswith('stats for'):
th = float(line.split()[-1])
TH.append(th)
if line.startswith('The network is'):
da = float(line.split()[-1])
DA.append(da)
th_last = th
f2 = open('plot_da_0.2.txt', 'r')
for line in f2:
(th, da) = line.split()
th = float(th)
if th < th_last:
continue
da = float(da)
TH.append(th)
DA.append(da)
f3 = open('DA_SR_th.tab', 'w')
for i in range(len(TH)):
f3.write(str(TH[i]) + '\t' + str(DA[i]) + '\n')
return TH, DA
def plot_DA(xaxis, da):
x = np.array(xaxis)
y = np.array(da)
plt.plot(x, y, 'c')
plt.grid(True)
plt.title('SR network')
#plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure)
plt.ylabel('degree assortativity')
plt.xlabel('SR threshold')
plt.savefig(img_out_plot,format='png',dpi=200)
def main():
os.chdir(IN_DIR)
x, DA = read_in_res()
plot_DA(x, DA)
main() | sanja7s/SR_Twitter | src_graph/plot_degree_assortativity.py | Python | mit | 1,447 |
import shelve
"""
Currently unused. All mysql queries are now done via IomDataModels.
May be resurrected to help with shelve and pickles
"""
from USCProjectDAOs import IOMProjectDAO
class IOMService(IOMProjectDAO):
"""
This handles interactions with the IOM data database and storage files.
All user applications should work off of this
"""
def __init__(self):
"""
Will hold the identifiers for records
"""
self.names = []
"""
Will hold the positive sentiment scores
"""
self.posSent = []
"""
Will hold the negative sentiment scores
"""
self.negSent = []
"""
Will hold the net sentiment scores
"""
self.netSent = []
"""
Will hold the sums of the absolute values of the sentiment scores
"""
self.absumSent = []
def connect_to_mysql(self, test):
"""
Test should be boolean
"""
IOMProjectDAO.__init__(self, test, 'true')
def get_sentiment_data_from_file(self, datafile):
"""
This is the generic file data loader.
datafile shold be a path to file
"""
# Open data file and push into lists
db = shelve.open(datafile)
self.keys = list(db.keys())
for k in self.keys:
s = db[k]
self.names.append(s['quote_id'])
self.posSent.append(s['avgPos'])
self.negSent.append(s['avgNeg'])
self.netSent.append(s['netSent'])
self.absumSent.append(abs(s['avgPos']) + abs(s['avgNeg']))
db.close()
def save_sentiment_data_to_file(self, datafile, label):
"""
This is a generic file data saver.
datafile should be a path to file
@param datafile: The path to the datafile
@type datafile: C{string}
"""
# try:
db = shelve.open(datafile)
db[label] = self.to_save
db.close()
print(self.to_save)
return self.to_save
# Check whether the problem was there not being a dictionary availble to save
#except:
# try:
# self.to_save
# print ('Problem saving')
# except:
# print ('No variable self.to_save set')
# def get_data_from_database(self, query, val):
# """
# This executes a parameterized query of the mysql database, stores the results in a list of dictionaries called self.dbdata.
#
# @return Also returns dbdata
#
# @param query A mysql query with %s in place of all substitution variables
# @type query string
# @param val A list containing all substition parameters or empty if no substitutions are needed
# @type val list
#
# TODO Should have something to check whether a connection exists
# """
# self.connect_to_mysql('false')
# self.query = query
# self.val = val
# self.returnAll()
# self.dbdata = list(self.results)
#
#
# class QueryShell(IOMService):
# """
# This is just a shell to easily run queries on the database and get the results as a list of dictionaries
#
# @return Returns list of dictionaries
# """
#
# def __init__(self):
# IOMService.__init__(self)
#
# def query(self, query, val):
# self.get_data_from_database(query, val)
# return self.dbdata
#
#
# class DHShell(IOMService):
# """
# This is a shell for use in public events to avoid cluttering up the page with each step of the query
# It resets all its values after returning an array of dictionaries and thus need not be reinvoked.
# Note that These queries are not parameterized
#
# @return Returns list of dictionaries
# """
#
# def __init__(self, query_string):
# """
# @param query_string The query string
# @type string
# """
# IOMService.__init__(self)
# self.q(query_string)
#
# def q(self, query_string):
# # Get rid of previous queries
# # self.results = []
# # self.dbdata = None
# #These queries are not parameterized
# val = []
# self.get_data_from_database(query_string, val)
# return self.dbdata
class ShelveDataHandler(IOMService):
def __init__(self):
import shelve
self.datafolder = 'storedData/'
def openData(self, file_name):
"""
Opens shelve file and returns the list
"""
db = shelve.open(self.datafolder + file_name)
list_to_populate = list(db.values())
db.close()
return list_to_populate[0]
def bagSaver(self, list_to_save, file_name):
"""
Saves a list of raw data into a shelve file.
@param list_to_save A list of items to be saved into shelf file
@type list_to_save list
@param file_name The name of the file into which the items should be saved
@type string
"""
try:
label = file_name
to_save = list_to_save
db = shelve.open(self.datafolder + file_name)
db[label] = to_save
db.close()
except:
print('Error saving to shelve file %s' % file_name)
else:
print('Successfully saved to shelve file %s ' % file_name) | PainNarrativesLab/IOMNarratives | IOMDataService.py | Python | mit | 5,350 |
# TODO: direction list operator?
from direction import Direction, Pivot
from charcoaltoken import CharcoalToken as CT
from unicodegrammars import UnicodeGrammars
from wolfram import (
String, Rule, DelayedRule, Span, Repeated, RepeatedNull, PatternTest,
Number, Expression
)
import re
from math import floor, ceil
def FindAll(haystack, needle):
r = []
if isinstance(haystack, str):
index = haystack.find(needle)
while True:
if ~index:
r += [index]
else:
return r
index = haystack.find(needle, index + 1)
else:
return [i for i, item in (haystack.items() if isinstance(haystack, dict) else enumerate(haystack)) if item == needle]
def ListFind(haystack, needle):
if isinstance(haystack, dict):
for i, item in haystack.items():
if item == needle:
return i
return None
return haystack.index(needle) if needle in haystack else -1
def dedup(iterable):
iterable = iterable[:]
items = []
i = 0
for item in iterable:
if item in items:
del iterable[i]
else:
i += 1
items += [item]
return iterable
def iter_apply(iterable, function):
clone = iterable[:]
clone[:] = [function(item) for item in clone]
return clone
def itersplit(iterable, number):
result = []
while len(iterable):
result += [iterable[:number]]
iterable = iterable[number:]
return result
def negate_str(string):
try:
return float(string) if "." in string else int(string)
except:
return string[::-1]
def abs_str(string):
try:
return abs(float(string) if "." in string else int(string))
except:
return string # ???
def _int(obj):
if isinstance(obj, str) and re.match("\d+\.?\d*$", obj):
return int(float(obj))
return int(obj)
def product(item):
result = 1
for part in item:
result *= part
return result
def Negate(item):
if isinstance(item, int) or isinstance(item, float):
return -item
if isinstance(item, str):
return negate_str(item)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
return String(negate_str(str(item)))
if hasattr(item, "__iter__"):
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Negate)
def Abs(item):
if isinstance(item, int) or isinstance(item, float):
return abs(item)
if isinstance(item, str):
return abs_str(item)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
return String(abs_str(str(item)))
if hasattr(item, "__iter__"):
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Abs)
def Sum(item):
if isinstance(item, float):
item = int(item)
if isinstance(item, int):
result = 0
while item:
result += item % 10
item //= 10
return result
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
if all(c in "0123456789." for c in item) and item.count(".") < 2:
return sum([0 if c == "." else int(c) for c in item])
return sum(
float(c) if "." in c else int(c)
for c in re.findall("\d+\.?\d*|\.\d+", item)
)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
if isinstance(item[0], str):
return "".join(item)
if isinstance(item[0], String):
return "".join(map(str, item))
if isinstance(item[0], list):
return sum(item, [])
return sum(item)
def Product(item):
if isinstance(item, float):
item = int(item)
if isinstance(item, int):
result = 1
while item:
result *= item % 10
item //= 10
return result
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
if all(c in "0123456789." for c in item) and item.count(".") < 2:
return product([0 if c == "." else int(c) for c in item])
return product(
float(c) if "." in c else int(c)
for c in re.findall("\d+\.?\d*|\.\d+", item)
)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
# TODO: cartesian product?
# if isinstance(item[0], list):
# return sum(item, [])
return product(item)
def vectorize(fn, afn=None, cast_string=True):
def vectorized(left, right, c):
if isinstance(left, String):
left = str(left)
if isinstance(right, String):
right = str(right)
if type(left) == Expression:
left = left.run()
if type(right) == Expression:
right = right.run()
left_type = type(left)
right_type = type(right)
left_is_iterable = (
hasattr(left, "__iter__") and not isinstance(left, str)
)
right_is_iterable = (
hasattr(right, "__iter__") and not isinstance(right, str)
)
if left_is_iterable or right_is_iterable:
if left_is_iterable and right_is_iterable:
result = afn(left, right, c) if afn else [
vectorized(l, r, c) for l, r in zip(left, right)
]
else:
result = (
[vectorized(item, right, c) for item in left]
if left_is_iterable else
[vectorized(left, item, c) for item in right]
)
result_type = type(left if left_is_iterable else right)
try:
return result_type(result)
except:
return result_type(result, left if left_is_iterable else right)
if cast_string and left_type == str:
left = (float if "." in left else int)(left)
if cast_string and right_type == str:
right = (float if "." in right else int)(right)
return fn(left, right, c)
return vectorized
def Incremented(item):
if isinstance(item, float) or isinstance(item, int):
return round(item + 1, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Incremented(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Incremented)
def Decremented(item):
if isinstance(item, float) or isinstance(item, int):
return round(item - 1, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Decremented(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Decremented)
def Doubled(item):
if isinstance(item, float) or isinstance(item, int):
return round(item * 2, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Doubled(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Doubled)
def Halved(item):
if isinstance(item, float) or isinstance(item, int):
return round(item / 2, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Halved(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Halved)
def Lower(item):
if isinstance(item, int) or isinstance(item, float):
return str(item)
if isinstance(item, str):
return item.lower()
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = String(str(item).lower())
if isinstance(item, str):
return item.lower()
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Lower)
def Min(item):
if isinstance(item, int) or isinstance(item, float):
return floor(item)
if isinstance(item, str):
return chr(min(map(ord, item)))
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
return String(Min(str(item)))
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return min(item)
def Max(item):
if isinstance(item, int) or isinstance(item, float):
return ceil(item)
if isinstance(item, str):
return chr(max(map(ord, item)))
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = String(str(item).lower())
if isinstance(item, str):
return item.lower()
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return max(item)
def Upper(item):
if isinstance(item, int) or isinstance(item, float):
return str(item)
if isinstance(item, str):
return item.upper()
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = String(str(item).upper())
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Upper)
def direction(dir):
if isinstance(dir, String):
dir = str(dir)
cls = type(dir)
if cls == Direction:
return dir
elif cls == int:
return [
Direction.right, Direction.up_right, Direction.up,
Direction.up_left, Direction.left, Direction.down_left,
Direction.down, Direction.down_right
][dir % 8]
elif cls == str:
cleaned = re.sub("[^a-z]", "", dir.lower()[:5])
lookup = {
"r": Direction.right,
"ri": Direction.right,
"rig": Direction.right,
"righ": Direction.right,
"right": Direction.right,
"ur": Direction.up_right,
"upr": Direction.up_right,
"upri": Direction.up_right,
"uprig": Direction.up_right,
"u": Direction.up,
"up": Direction.up,
"ul": Direction.up_left,
"upl": Direction.up_left,
"uple": Direction.up_left,
"uplef": Direction.up_left,
"l": Direction.left,
"le": Direction.left,
"lef": Direction.left,
"left": Direction.left,
"dl": Direction.down_left,
"downl": Direction.down_left,
"d": Direction.down,
"do": Direction.down,
"dow": Direction.down,
"down": Direction.down,
"dr": Direction.down_right,
"downr": Direction.down_right
}
if cleaned in lookup:
return lookup[cleaned]
elif any(c in dir for c in "0123456789"):
return [
Direction.right, Direction.up_right, Direction.up,
Direction.up_left, Direction.left, Direction.down_left,
Direction.down, Direction.down_right
][int(re.search("\d+", dir).group()) % 8]
else:
return 0
InterpreterProcessor = {
CT.Arrow: [
lambda r: lambda c: Direction.left,
lambda r: lambda c: Direction.up,
lambda r: lambda c: Direction.right,
lambda r: lambda c: Direction.down,
lambda r: lambda c: Direction.up_left,
lambda r: lambda c: Direction.up_right,
lambda r: lambda c: Direction.down_right,
lambda r: lambda c: Direction.down_left,
lambda r: lambda c: direction(r[1](c))
],
CT.Multidirectional: [
lambda r: lambda c: r[0](c) + r[1](c),
lambda r: lambda c: [
Direction.right, Direction.down, Direction.left, Direction.up
] + r[1](c),
lambda r: lambda c: [
Direction.down_right, Direction.down_left, Direction.up_left,
Direction.up_right
] + r[1](c),
lambda r: lambda c: [
Direction.right, Direction.down_right,
Direction.down, Direction.down_left,
Direction.left, Direction.up_left,
Direction.up, Direction.up_right
] + r[1](c),
lambda r: lambda c: [Direction.down, Direction.up] + r[1](c),
lambda r: lambda c: [Direction.right, Direction.left] + r[1](c),
lambda r: lambda c: [
Direction.down_right, Direction.up_left
] + r[1](c),
lambda r: lambda c: [
Direction.down_left, Direction.up_right
] + r[1](c),
lambda r: lambda c: [
Direction.down_right, Direction.up_right
] + r[1](c),
lambda r: lambda c: [Direction.down_left, Direction.up_left] + r[1](c),
lambda r: lambda c: [
Direction.down_right, Direction.down_left
] + r[1](c),
lambda r: lambda c: [
Direction.down_right, Direction.down, Direction.up,
Direction.up_right
] + r[1](c),
lambda r: lambda c: [Direction.right, Direction.up] + r[1](c),
lambda r: lambda c: [
Direction.right, Direction.down, Direction.left
] + r[1](c),
lambda r: lambda c: [Direction.up_left, Direction.up_right] + r[1](c),
lambda r: lambda c: [
Direction.down, Direction.up_left, Direction.up_right
] + r[1](c),
lambda r: lambda c: [Direction.down_left, Direction.left] + r[1](c),
lambda r: lambda c: [Direction.down, Direction.left] + r[1](c),
lambda r: lambda c: [Direction.right, Direction.up] + r[1](c),
lambda r: lambda c: [Direction.right, Direction.down] + r[1](c),
lambda r: lambda c: r[1](c),
lambda r: lambda c: r[1](c),
lambda r: lambda c: [direction(item) for item in r[1](c)],
lambda r: lambda c: []
],
CT.Side: [lambda r: lambda c: (r[0](c), r[1](c))],
CT.EOF: [lambda r: None],
CT.String: [lambda r: r],
CT.Number: [lambda r: r],
CT.Name: [lambda r: r],
CT.S: [lambda r: None] * 2,
CT.Span: [
lambda r: lambda c: Span(r[0](c), r[2](c), r[4](c)),
lambda r: lambda c: Span(r[0](c), None, r[3](c)),
lambda r: lambda c: Span(r[0](c), r[2](c)),
lambda r: lambda c: Span(r[0](c)),
lambda r: lambda c: Span(None, r[1](c), r[3](c)),
lambda r: lambda c: Span(None, r[1](c)),
lambda r: lambda c: Span(None, None, r[2](c)),
lambda r: lambda c: Span()
],
CT.Arrows: [
lambda r: lambda c: [r[0](c)] + r[1](c),
lambda r: lambda c: [r[0](c)]
],
CT.Sides: [
lambda r: lambda c: [r[0](c)] + r[1](c),
lambda r: lambda c: [r[0](c)]
],
CT.Expressions: [
lambda r: lambda c: [r[0](c)] + r[1](c),
lambda r: lambda c: [r[0](c)]
],
CT.WolframExpressions: [
lambda r: lambda c: [r[0](c)] + r[1](c),
lambda r: lambda c: [r[0](c)]
],
CT.PairExpressions: [
lambda r: lambda c: [(r[0](c), r[1](c))] + r[2](c),
lambda r: lambda c: [(r[0](c), r[1](c))]
],
CT.Cases: [
lambda r: lambda c: [(r[0](c), r[1])] + r[2](c),
lambda r: lambda c: []
],
CT.List: [
lambda r: lambda c: r[1](c),
lambda r: lambda c: []
] * 2,
CT.WolframList: [
lambda r: lambda c: r[1](c),
lambda r: lambda c: []
] * 2,
CT.Dictionary: [
lambda r: lambda c: dict(r[1](c)),
lambda r: lambda c: {}
] * 2,
CT.WolframExpression: [
lambda r: lambda c: r[0](c),
lambda r: lambda c: r[0](c)
],
CT.Expression: [
lambda r: lambda c: r[0],
lambda r: lambda c: r[0],
lambda r: lambda c: c.Retrieve(r[0]),
lambda r: lambda c: r[0](c),
lambda r: lambda c: r[1](c),
lambda r: lambda c: r[1](c),
lambda r: lambda c: r[0](c),
lambda r: lambda c: c.Lambdafy(r[1]),
lambda r: lambda c: c.Lambdafy(r[1]),
lambda r: lambda c: r[0](c),
lambda r: lambda c: r[0](r[1], r[2], r[3], r[4], c),
lambda r: lambda c: r[0](r[1](c), r[2](c), r[3](c), r[4](c), c),
lambda r: lambda c: r[0](r[1], r[2], r[3], c),
lambda r: lambda c: r[0](r[1](c), r[2](c), r[3](c), c),
lambda r: lambda c: r[0](r[1], r[2], c),
lambda r: lambda c: r[0](r[1](c), r[2](c), c),
lambda r: lambda c: r[0](r[1], c),
lambda r: lambda c: r[0](r[1](c), c),
lambda r: lambda c: r[0](c),
lambda r: lambda c: r[0](r[1], r[2], r[3], r[4], c),
lambda r: lambda c: r[0](r[1](c), r[2](c), r[3](c), r[4](c), c),
lambda r: lambda c: r[0](r[1], r[2], r[3], c),
lambda r: lambda c: r[0](r[1](c), r[2](c), r[3](c), c),
lambda r: lambda c: r[0](r[1], r[2], c),
lambda r: lambda c: r[0](r[1](c), r[2](c), c),
lambda r: lambda c: r[0](r[1], c),
lambda r: lambda c: r[0](r[1](c), c)
],
CT.ExpressionOrEOF: [
lambda r: lambda c: r[0](c),
lambda r: lambda c: c.Input()
],
CT.Nilary: [
lambda r: lambda c: c.InputString(),
lambda r: lambda c: c.InputNumber(),
lambda r: lambda c: c.Input(),
lambda r: lambda c: c.Random(),
lambda r: lambda c: c.PeekAll(),
lambda r: lambda c: c.PeekMoore(),
lambda r: lambda c: c.PeekVonNeumann(),
lambda r: lambda c: c.Peek(),
lambda r: lambda c: c.x,
lambda r: lambda c: c.y
],
CT.Unary: [
lambda r: lambda item, c: (
iter_apply(item, lambda x: -x)
if hasattr(item, "__iter__") else
(-item)
if (
isinstance(item, int) or isinstance(item, float) or
isinstance(item, Number)
) else
negate_str(str(item))
),
lambda r: lambda item, c: (
len(item) if hasattr(item, "__iter__") else len(str(item))
),
lambda r: lambda item, c: int(not item),
lambda r: lambda item, c: c.Cast(item),
lambda r: lambda item, c: c.Random(item),
lambda r: lambda item, c: c.Evaluate(item),
lambda r: lambda item, c: item.pop(),
lambda r: lambda item, c: Lower(item),
lambda r: lambda item, c: Upper(item),
lambda r: lambda item, c: Min(item),
lambda r: lambda item, c: Max(item),
lambda r: lambda item, c: c.ChrOrd(item),
lambda r: lambda item, c: (
item[::-1]
if hasattr(item, "__iter__") else
int(str(item)[::-1])
if isinstance(item, int) else
float(
("-" + str(item)[:0:-1])
if item[-1] == "-" else
str(item)[::-1]
)
if isinstance(item, float) else
str(item)[::-1]
),
lambda r: lambda item, c: c.Retrieve(item),
lambda r: lambda item, c: Repeated(item),
lambda r: lambda item, c: RepeatedNull(item),
lambda r: lambda item, c: item[:],
lambda r: lambda item, c: (
list(range(int(item) + 1))
if isinstance(item, int) or isinstance(item, float) else
list(map(chr, range(ord(item) + 1)))
),
lambda r: lambda item, c: (
list(range(int(item)))
if isinstance(item, int) or isinstance(item, float) else
list(map(chr, range(ord(item))))
),
lambda r: lambda item, c: (
~item
if isinstance(item, int) or isinstance(item, float) else
(~(float(str(item)) if "." in item else int(str(item))))
),
lambda r: lambda item, c: Abs(item),
lambda r: lambda item, c: Sum(item),
lambda r: lambda item, c: Product(item),
lambda r: lambda item, c: Incremented(item),
lambda r: lambda item, c: Decremented(item),
lambda r: lambda item, c: Doubled(item),
lambda r: lambda item, c: Halved(item),
lambda r: lambda item, c: eval(item),
lambda r: lambda item, c: item ** 0.5
],
CT.Binary: [
lambda r: lambda left, right, c: c.Add(left, right),
lambda r: lambda left, right, c: c.Subtract(left, right),
lambda r: lambda left, right, c: c.Multiply(left, right),
lambda r: lambda left, right, c: c.Divide(left, right),
lambda r: lambda left, right, c: c.Divide(left, right, False),
lambda r: vectorize(
lambda left, right, c: left % right,
cast_string=False
),
lambda r: lambda left, right, c: int(left == right),
lambda r: lambda left, right, c: int(left < right),
lambda r: lambda left, right, c: int(left > right),
lambda r: vectorize(lambda left, right, c: left & right),
lambda r: vectorize(lambda left, right, c:
String(left) | String(right)
if isinstance(left, str) and isinstance(right, str) else
left | right,
cast_string=False
),
lambda r: lambda left, right, c: (
list(range(int(left), int(right) + 1))
if isinstance(left, int) or isinstance(left, float) else
list(map(chr, range(ord(left), ord(right) + 1)))
),
lambda r: lambda left, right, c: (
list(range(int(left), int(right)))
if isinstance(left, int) or isinstance(left, float) else
list(map(chr, range(ord(left), ord(right))))
if isinstance(left, str) and isinstance(right, str) else
c.CycleChop(left, right)
),
lambda r: vectorize(lambda left, right, c: left ** right),
lambda r: lambda left, right, c: (
lambda value: "" if value == "\x00" else value
)(
(left[right] if right in left else None)
if isinstance(left, dict) else
left[int(right) % len(left)]
if isinstance(left, list) or isinstance(left, str) else
(
getattr(left, right)
if isinstance(right, str) and hasattr(left, right) else
left[right % len(left)] # default to iterable
)
),
lambda r: lambda left, right, c: left.append(right) or left,
lambda r: lambda left, right, c: right.join(map(str, left)),
lambda r: lambda left, right, c: (
itersplit(left, int(right))
if isinstance(right, int) or isinstance(right, float) else
list(map(int, str(left).split(str(right))))
if isinstance(left, int) or isinstance(left, float) else
left.split(right)
if isinstance(left, str) and isinstance(right, str) else
[item.split(right) for item in left]
if hasattr(left, "__getitem__") and isinstance(right, str) else
re.split(left, "|".join(map(re.escape, right)))
),
lambda r: lambda left, right, c: FindAll(left, right),
lambda r: lambda left, right, c: (
left.find(right)
if isinstance(left, str) else
ListFind(left, right)
),
lambda r: lambda left, right, c: " " * (int(right) - len(left)) + left,
lambda r: lambda left, right, c: left + " " * (int(right) - len(left)),
lambda r: lambda left, right, c: list(left.values()).count(right) if isinstance(left, dict) else left.count(right),
lambda r: lambda left, right, c: Rule(left, right),
lambda r: lambda left, right, c: DelayedRule(left, right),
lambda r: lambda left, right, c: PatternTest(left, right),
lambda r: lambda left, right, c: left[_int(right):],
lambda r: lambda left, right, c: c.Base(left, right),
lambda r: lambda left, right, c: c.BaseString(left, right)
],
CT.Ternary: [lambda r: lambda x, y, z, c: x[_int(y):_int(z)]],
CT.Quarternary: [lambda r: lambda x, y, z, w, c: x[
_int(y):_int(z):_int(w)
]],
CT.LazyUnary: [],
CT.LazyBinary: [
lambda r: lambda left, right, c: left(c) and right(c),
lambda r: lambda left, right, c: left(c) or right(c)
],
CT.LazyTernary: [
lambda r: lambda x, y, z, c: c.Ternary(x, y, z)
],
CT.LazyQuarternary: [],
CT.OtherOperator: [
lambda r: lambda c: c.PeekDirection(r[1](c), r[2](c)),
lambda r: lambda c: c.Map(r[1](c), r[2]),
lambda r: lambda c: c.Map(r[1](c), r[2], string_map=True),
lambda r: lambda c: c.Any(r[1](c), r[2]),
lambda r: lambda c: c.All(r[1](c), r[2]),
lambda r: lambda c: c.Filter(r[1](c), r[2]),
lambda r: lambda c: c.EvaluateVariable(r[1](c), r[2](c)),
lambda r: lambda c: c.EvaluateVariable(r[1](c), [r[2](c)]),
lambda r: lambda c: c.EvaluateVariable(r[1](c), [])
],
CT.Program: [
lambda r: lambda c: ((r[0](c) or True) and r[2](c)),
lambda r: lambda c: None
],
CT.NonEmptyProgram: [
lambda r: lambda c: ((r[0](c) or True) and r[2](c)),
lambda r: lambda c: r[0](c)
],
CT.Body: [
lambda r: lambda c: r[1](c),
lambda r: lambda c: r[1](c),
lambda r: lambda c: r[0](c)
],
CT.Command: [
lambda r: lambda c: c.InputString(r[1]),
lambda r: lambda c: c.InputNumber(r[1]),
lambda r: lambda c: c.Input(r[1]),
lambda r: lambda c: c.Evaluate(r[1](c), True),
lambda r: lambda c: c.Print(r[1](c), directions=[r[0](c)]),
lambda r: lambda c: c.Print(r[0](c)),
lambda r: lambda c: c.Multiprint(r[2](c), directions=dedup(r[1](c))),
lambda r: lambda c: c.Multiprint(r[1](c)),
lambda r: lambda c: c.Polygon(r[1](c), r[2](c)),
lambda r: lambda c: c.Polygon(
[[(side, length) for side in r[1](c)] for length in [r[2](c)]][0],
r[3](c)
),
lambda r: lambda c: c.Polygon(r[1](c), r[2](c), fill=False),
lambda r: lambda c: c.Polygon(
[[(side, length) for side in r[1](c)] for length in [r[2](c)]][0],
r[3](c), fill=False
),
lambda r: lambda c: c.Rectangle(r[1](c), r[2](c)),
lambda r: lambda c: c.Rectangle(r[1](c)),
lambda r: lambda c: c.Oblong(r[1](c), r[2](c), r[3](c)),
lambda r: lambda c: c.Oblong(r[1](c), r[2](c)),
lambda r: lambda c: c.Rectangle(r[1](c), r[2](c), r[3](c)),
lambda r: lambda c: c.Rectangle(r[1](c), r[2](c)),
lambda r: lambda c: c.Move(r[0](c)),
lambda r: lambda c: c.Move(r[1](c)),
lambda r: lambda c: c.Move(r[2](c), r[1](c)),
lambda r: lambda c: c.Move(r[1](c), r[2](c)),
lambda r: lambda c: c.Pivot(Pivot.left, r[1](c)),
lambda r: lambda c: c.Pivot(Pivot.left),
lambda r: lambda c: c.Pivot(Pivot.right, r[1](c)),
lambda r: lambda c: c.Pivot(Pivot.right),
lambda r: lambda c: c.Jump(r[1](c), r[2](c)),
lambda r: lambda c: c.RotateTransform(r[1](c)),
lambda r: lambda c: c.RotateTransform(),
lambda r: lambda c: c.ReflectTransform(r[1](c)),
lambda r: lambda c: c.ReflectTransform(r[1](c)),
lambda r: lambda c: c.ReflectTransform(),
lambda r: lambda c: c.RotatePrism(r[2], r[1](c), number=True),
lambda r: lambda c: c.RotatePrism(r[2](c), r[1](c)),
lambda r: lambda c: c.RotatePrism(anchor=r[1](c)),
lambda r: lambda c: c.RotatePrism(r[1], number=True),
lambda r: lambda c: c.RotatePrism(r[1](c)),
lambda r: lambda c: c.RotatePrism(),
lambda r: lambda c: c.ReflectMirror(r[1](c)),
lambda r: lambda c: c.ReflectMirror(r[1](c)),
lambda r: lambda c: c.ReflectMirror(),
lambda r: lambda c: c.RotateCopy(r[2], r[1](c), number=True),
lambda r: lambda c: c.RotateCopy(r[2](c), r[1](c)),
lambda r: lambda c: c.RotateCopy(anchor=r[1](c)),
lambda r: lambda c: c.RotateCopy(r[1], number=True),
lambda r: lambda c: c.RotateCopy(r[1](c)),
lambda r: lambda c: c.RotateCopy(),
lambda r: lambda c: c.ReflectCopy(r[1](c)),
lambda r: lambda c: c.ReflectCopy(r[1](c)),
lambda r: lambda c: c.ReflectCopy(),
lambda r: lambda c: c.RotateOverlap(
r[2], r[1](c), overlap=r[4](c), number=True
),
lambda r: lambda c: c.RotateOverlap(r[2](c), r[1](c), overlap=r[3](c)),
lambda r: lambda c: c.RotateOverlap(anchor=r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.RotateOverlap(
r[1], overlap=r[3](c), number=True
),
lambda r: lambda c: c.RotateOverlap(r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.RotateOverlap(overlap=r[1](c)),
lambda r: lambda c: c.RotateOverlap(r[2], r[1](c), number=True),
lambda r: lambda c: c.RotateOverlap(r[2](c), r[1](c)),
lambda r: lambda c: c.RotateOverlap(anchor=r[1](c)),
lambda r: lambda c: c.RotateOverlap(r[1], number=True),
lambda r: lambda c: c.RotateOverlap(r[1](c)),
lambda r: lambda c: c.RotateOverlap(),
lambda r: lambda c: c.RotateShutter(
r[2], r[1](c), overlap=r[4](c), number=True
),
lambda r: lambda c: c.RotateShutter(r[2](c), r[1](c), overlap=r[3](c)),
lambda r: lambda c: c.RotateShutter(anchor=r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.RotateShutter(
r[1], overlap=r[3](c), number=True
),
lambda r: lambda c: c.RotateShutter(r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.RotateShutter(overlap=r[1](c)),
lambda r: lambda c: c.RotateShutter(r[2], r[1](c), number=True),
lambda r: lambda c: c.RotateShutter(r[2](c), r[1](c)),
lambda r: lambda c: c.RotateShutter(anchor=r[1](c)),
lambda r: lambda c: c.RotateShutter(r[1], number=True),
lambda r: lambda c: c.RotateShutter(r[1](c)),
lambda r: lambda c: c.RotateShutter(),
lambda r: lambda c: c.ReflectOverlap(r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.ReflectOverlap(r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.ReflectOverlap(overlap=r[1](c)),
lambda r: lambda c: c.ReflectOverlap(r[1](c)),
lambda r: lambda c: c.ReflectOverlap(r[1](c)),
lambda r: lambda c: c.ReflectOverlap(),
lambda r: lambda c: c.ReflectButterfly(r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.ReflectButterfly(r[1](c), overlap=r[2](c)),
lambda r: lambda c: c.ReflectButterfly(overlap=r[1](c)),
lambda r: lambda c: c.ReflectButterfly(r[1](c)),
lambda r: lambda c: c.ReflectButterfly(r[1](c)),
lambda r: lambda c: c.ReflectButterfly(),
lambda r: lambda c: c.Rotate(r[1](c)),
lambda r: lambda c: c.Rotate(),
lambda r: lambda c: c.Reflect(r[1](c)),
lambda r: lambda c: c.Reflect(),
lambda r: lambda c: c.Copy(r[1](c), r[2](c)),
lambda r: lambda c: c.For(r[1], r[2]),
lambda r: lambda c: c.While(r[1], r[2]),
lambda r: lambda c: c.If(r[1], r[2], r[3]),
lambda r: lambda c: c.If(r[1], r[2], lambda c: None),
lambda r: lambda c: c.Assign(r[1](c), r[2](c), r[3](c)),
lambda r: lambda c: c.Assign(r[1](c), r[2]),
lambda r: lambda c: c.Assign(r[2](c), r[1](c)),
lambda r: lambda c: c.Fill(r[1](c)),
lambda r: lambda c: c.SetBackground(r[1](c)),
lambda r: lambda c: c.Dump(),
lambda r: lambda c: c.RefreshFor(r[1](c), r[2], r[3]),
lambda r: lambda c: c.RefreshWhile(r[1](c), r[2], r[3]),
lambda r: lambda c: c.Refresh(r[1](c)),
lambda r: lambda c: c.Refresh(),
lambda r: lambda c: c.ToggleTrim(),
lambda r: lambda c: c.Crop(r[1](c), r[2](c)),
lambda r: lambda c: c.Crop(r[1](c)),
lambda r: lambda c: c.Clear(False),
lambda r: lambda c: c.Extend(r[1](c), r[2](c)),
lambda r: lambda c: c.Extend(r[1](c)),
lambda r: lambda c: r[1](c).append(r[2](c))
] + [
lambda r: lambda c: dict(r[2](c)).get(r[1](c), r[3])(c),
lambda r: lambda c: dict(r[2](c)).get(
r[1](c), lambda *arguments: None
)(c)
] * 3 + [
lambda r: lambda c: c.Map(r[1](c), r[2], True),
lambda r: lambda c: c.ExecuteVariable(r[1](c), r[2](c)),
lambda r: lambda c: c.ExecuteVariable(r[1](c), [r[2](c)]),
lambda r: lambda c: c.MapAssignLeft(r[3], r[2](c), r[1]),
lambda r: lambda c: c.MapAssign(r[2], r[1]),
lambda r: lambda c: c.MapAssignRight(r[3], r[2](c), r[1]),
lambda r: lambda c: c.MapAssign(r[2], r[1]),
lambda r: lambda c: exec(r[1](c))
]
}
| somebody1234/Charcoal | interpreterprocessor.py | Python | mit | 34,121 |
Subsets and Splits
Unique Repositories with URLs
Lists unique repository names along with their GitHub URLs, providing basic identification information for each repository.