repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mjbrownie/django-cloudmailin | cloudmailin/views.py | 1 | 2088 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseForbidden, HttpResponseServerError
import hashlib
def generate_signature(params, secret):
sig = "".join(params[k].encode('utf-8') for k in sorted(params.keys()) if k != "signature")
sig = hashlib.md5(sig + secret).hexdigest()
return sig
class MailHandler(object):
csrf_exempt = True
def __init__(self, *args, **kwargs):
super(MailHandler, self).__init__(*args, **kwargs)
self._addresses = {}
def __call__(self, request, *args, **kwargs):
params = dict((k, v) for k, v in request.POST.iteritems())
to = params.get('to', None)
if '+' in to:
lto = to.split('+')
to = lto[0] + "@" + lto[1].split('@')[1]
addr = self._addresses.get(to, None)
if addr is None:
return HttpResponseNotFound("recipient address is not found", mimetype="text/plain")
try:
if not self.is_valid_signature(params, addr['secret']):
return HttpResponseForbidden("invalid message signature", mimetype="text/plain")
addr['callback'](**params)
except Exception, e:
return HttpResponseServerError(e.message, mimetype="text/plain")
resp = HttpResponse("")
resp.csrf_exempt = True
return resp
def is_valid_signature(self, params, secret):
if 'signature' in params:
sig = generate_signature(params, secret)
return params['signature'] == sig
def register_address(self, address, secret, callback):
self._addresses["<%s>" % address] = {
'secret': secret,
'callback': callback,
}
return True
def unregister_address(self, address):
if address in self._addresses:
del self._addresses[address]
return True
return False
| bsd-3-clause | 6,135,572,548,960,023,000 | 31.625 | 106 | 0.573755 | false | 4.54902 | false | false | false |
costastf/toonlib | _CI/bin/semver.py | 1 | 6205 | # -*- coding: utf-8 -*-
import re
_REGEX = re.compile('^(?P<major>(?:0|[1-9][0-9]*))'
'\.(?P<minor>(?:0|[1-9][0-9]*))'
'\.(?P<patch>(?:0|[1-9][0-9]*))'
'(\-(?P<prerelease>[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?'
'(\+(?P<build>[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$')
_LAST_NUMBER = re.compile(r'(?:[^\d]*(\d+)[^\d]*)+')
if not hasattr(__builtins__, 'cmp'):
cmp = lambda a, b: (a > b) - (a < b)
def parse(version):
"""
Parse version to major, minor, patch, pre-release, build parts.
"""
match = _REGEX.match(version)
if match is None:
raise ValueError('%s is not valid SemVer string' % version)
verinfo = match.groupdict()
verinfo['major'] = int(verinfo['major'])
verinfo['minor'] = int(verinfo['minor'])
verinfo['patch'] = int(verinfo['patch'])
return verinfo
def compare(ver1, ver2):
def nat_cmp(a, b):
a, b = a or '', b or ''
convert = lambda text: (2, int(text)) if re.match('[0-9]+', text) else (1, text)
split_key = lambda key: [convert(c) for c in key.split('.')]
return cmp(split_key(a), split_key(b))
def compare_by_keys(d1, d2):
for key in ['major', 'minor', 'patch']:
v = cmp(d1.get(key), d2.get(key))
if v:
return v
rc1, rc2 = d1.get('prerelease'), d2.get('prerelease')
rccmp = nat_cmp(rc1, rc2)
build_1, build_2 = d1.get('build'), d2.get('build')
build_cmp = nat_cmp(build_1, build_2)
if not rccmp and not build_cmp:
return 0
if not rc1 and not build_1:
return 1
elif not rc2 and not build_2:
return -1
return rccmp or build_cmp
v1, v2 = parse(ver1), parse(ver2)
return compare_by_keys(v1, v2)
def match(version, match_expr):
prefix = match_expr[:2]
if prefix in ('>=', '<=', '=='):
match_version = match_expr[2:]
elif prefix and prefix[0] in ('>', '<', '='):
prefix = prefix[0]
match_version = match_expr[1:]
else:
raise ValueError("match_expr parameter should be in format <op><ver>, "
"where <op> is one of ['<', '>', '==', '<=', '>=']. "
"You provided: %r" % match_expr)
possibilities_dict = {
'>': (1,),
'<': (-1,),
'==': (0,),
'>=': (0, 1),
'<=': (-1, 0)
}
possibilities = possibilities_dict[prefix]
cmp_res = compare(version, match_version)
return cmp_res in possibilities
def max_ver(ver1, ver2):
cmp_res = compare(ver1, ver2)
if cmp_res == 0 or cmp_res == 1:
return ver1
else:
return ver2
def min_ver(ver1, ver2):
cmp_res = compare(ver1, ver2)
if cmp_res == 0 or cmp_res == -1:
return ver1
else:
return ver2
def format_version(major, minor, patch, prerelease=None, build=None):
version = "%d.%d.%d" % (major, minor, patch)
if prerelease is not None:
version = version + "-%s" % prerelease
if build is not None:
version = version + "+%s" % build
return version
def _increment_string(string):
# look for the last sequence of number(s) in a string and increment, from:
# http://code.activestate.com/recipes/442460-increment-numbers-in-a-string/#c1
match = _LAST_NUMBER.search(string)
if match:
next_ = str(int(match.group(1))+1)
start, end = match.span(1)
string = string[:max(end - len(next_), start)] + next_ + string[end:]
return string
def bump_major(version):
verinfo = parse(version)
return format_version(verinfo['major'] + 1, 0, 0)
def bump_minor(version):
verinfo = parse(version)
return format_version(verinfo['major'], verinfo['minor'] + 1, 0)
def bump_patch(version):
verinfo = parse(version)
return format_version(verinfo['major'], verinfo['minor'], verinfo['patch'] + 1)
def bump_prerelease(version):
verinfo = parse(version)
verinfo['prerelease'] = _increment_string(verinfo['prerelease'] or 'rc.0')
return format_version(verinfo['major'], verinfo['minor'], verinfo['patch'],
verinfo['prerelease'])
def bump_build(version):
verinfo = parse(version)
verinfo['build'] = _increment_string(verinfo['build'] or 'build.0')
return format_version(verinfo['major'], verinfo['minor'], verinfo['patch'],
verinfo['prerelease'], verinfo['build'])
# https://github.com/k-bx/python-semver/blob/master/LICENSE.txt
# Downloaded: 20160406 from https://pypi.python.org/pypi/semver version 2.4.1
# Copyright (c) 2013, Konstantine Rybnikov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| mit | 1,343,708,063,086,418,700 | 32.722826 | 88 | 0.607736 | false | 3.4723 | false | false | false |
athena-voice/athena-voice-client | athena/apis.py | 1 | 1210 | """
Finds and stores APIs in the 'api_lib' global variable
"""
import pkgutil
import inspect
import traceback
from athena import settings
api_lib = None
def find_apis():
""" Find APIs """
global api_lib
api_lib = {}
print('~ Looking for APIs in:', settings.API_DIRS)
for finder, name, _ in pkgutil.iter_modules(settings.API_DIRS):
try:
file = finder.find_module(name).load_module(name)
for member in dir(file):
obj = getattr(file, member)
if inspect.isclass(obj):
for parent in obj.__bases__:
if 'Api' is parent.__name__:
api = obj()
api_lib[api.key] = api
except Exception as e:
print(traceback.format_exc())
print('\n~ Error loading \''+name+'\' '+str(e))
def verify_apis(user):
""" Verify APIs """
global api_lib
api_lib = dict(api for api in api_lib.items() if api[1].verify_data(user))
def list_apis():
""" List APIs """
global api_lib
print('\n~ APIs: ', end='')
print(str(list(api_lib.keys()))[1:-1]+'\n')
| gpl-3.0 | 3,669,009,364,997,560,000 | 25.5 | 78 | 0.513223 | false | 3.878205 | false | false | false |
Blueshoe/djangocms-workflows | workflows/forms.py | 1 | 2853 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from .models import Action
class ActionForm(forms.Form):
message_ = forms.CharField(
label=_('Message'),
required=False,
help_text=_('You may provide some more information.'),
widget=forms.Textarea
)
editor_ = forms.ModelChoiceField(
label=_('Editor'),
queryset=get_user_model().objects.none(),
help_text=_('Only notify a specific user?'),
required=False
)
def __init__(self, *args, **kwargs):
self.stage = kwargs.pop('stage', None)
self.title = kwargs.pop('title')
self.request = kwargs.pop('request')
self.workflow = kwargs.pop('workflow')
self.action_type = kwargs.pop('action_type') # {open, approve, reject, cancel}
self.next_stage = self.workflow.next_mandatory_stage(self.stage)
self.group = getattr(self.stage, 'group', None)
cr = Action.get_current_request(self.title)
self.current_action = None if (not cr or cr.is_closed()) else cr.last_action()
self.user = self.request.user
super(ActionForm, self).__init__(*args, **kwargs)
self.adjust_editor()
@property
def message(self):
"""
:rtype: str
"""
return self.cleaned_data.get('message_', '')
@property
def editor(self):
"""
:rtype: django.contrib.auth.models.User
"""
return self.cleaned_data.get('editor')
@property
def editors(self):
if self.next_stage is None:
raise ValueError('No next stage!')
if self.editor:
return get_user_model().objects.filter(pk=self.editor.pk)
return self.next_stage.group.user_set.all()
def adjust_editor(self):
if self.action_type in (Action.CANCEL, Action.REJECT) or self.next_stage is None:
self.fields.pop('editor_', None) # no editor can be chosen
return
group = self.next_stage.group
self.fields['editor_'].queryset = group.user_set.all()
self.fields['editor_'].empty_label = _('Any {}').format(group.name)
def save(self):
"""
:rtype: Action
"""
init_kwargs = {
attr: getattr(self, attr) for attr in
('message', 'user', 'title', 'workflow', 'stage', 'action_type', 'group')
}
if self.current_action is None:
assert self.action_type == Action.REQUEST # root must be request
return Action.add_root(**init_kwargs)
else:
assert self.action_type != Action.REQUEST # non-root must not be request
return self.current_action.add_child(**init_kwargs)
| mit | 9,073,147,331,475,379,000 | 33.373494 | 89 | 0.592709 | false | 3.918956 | false | false | false |
osroom/osroom | apps/modules/theme_setting/process/nav_setting.py | 1 | 5260 | #!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2019/12/2 14:43
# @Author : Allen Woo
from bson import ObjectId
from flask import request, g
from flask_babel import gettext
from apps.app import mdbs, cache
from apps.core.flask.reqparse import arg_verify
from apps.utils.format.obj_format import json_to_pyseq, objid_to_str, str_to_num
@cache.cached(timeout=86400, key_base64=False, db_type="redis")
def get_global_theme_navs(theme_name, lang):
langs = g.site_global["language"]["all_language"].keys()
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": lang,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
else:
for la in langs:
if la == lang:
continue
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": la,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
return []
def get_navs():
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
s, r = arg_verify(
[
(gettext("theme name"), theme_name),
(gettext("language"), lang)
],
required=True
)
if not s:
return r
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{"language": lang, "theme_name": theme_name}
).sort([("order", 1)])
navs = objid_to_str(navs)
data = {
"navs": navs
}
return data
def nav_setting():
"""
Update
:RETURN:
"""
cid = request.argget.all("id")
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
display_name = request.argget.all("display_name")
order = str_to_num(request.argget.all("order", 99))
json_data = json_to_pyseq(request.argget.all("json_data"))
s, r = arg_verify(
[(gettext("Display name"), display_name),
(gettext("theme name"), theme_name),
(gettext("language"), lang),
(gettext("Json data"), json_data)
],
required=True
)
if not s:
return r
if not isinstance(json_data, dict):
data = {
"msg": gettext('Value must be of type json'),
"msg_type": "e",
"custom_status": 400
}
return data
if not cid:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].insert_one(updata)
if r.inserted_id:
data = {
"msg": gettext("Navigation added successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Failed to add navigation"),
"msg_type": "w",
"custom_status": 400
}
else:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].update_one(
{"_id": ObjectId(cid)},
{"$set": updata}
)
if r.modified_count:
data = {
"msg": gettext("Updated successfully"),
"msg_type": "s",
"custom_status": 200
}
elif r.matched_count:
data = {
"msg": gettext("Unmodified"),
"msg_type": "w",
"custom_status": 200
}
else:
data = {
"msg": gettext("Update failed"),
"msg_type": "w",
"custom_status": 400
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
def del_navs():
ids = json_to_pyseq(request.argget.all("ids"))
s, r = arg_verify(
[(gettext("ids"), ids)],
required=True
)
if not s:
return r
del_ids = []
for id in ids:
del_ids.append(ObjectId(id))
r = mdbs["sys"].dbs["theme_nav_setting"].delete_many({"_id": {"$in": del_ids}})
if r.deleted_count:
data = {
"msg": gettext("Deleted successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Delete failed"),
"msg_type": "s",
"custom_status": 200
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
| bsd-2-clause | 6,717,044,610,144,772,000 | 26.128342 | 83 | 0.459125 | false | 3.836616 | false | false | false |
RuthAngus/chronometer | chronometer/compare.py | 1 | 3001 | """
Compare the properties injected to the properties recovered.
Particularly the Ages.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def get_stats_from_samples(samples):
"""
Take a 2d array of samples and produce medians and confidence intervals.
"""
meds = np.array([np.median(samples[:, i]) for i in
range(np.shape(samples)[1])])
lower = np.array([np.percentile(samples[:, i], 16) for i in
range(np.shape(samples)[1])])
upper = np.array([np.percentile(samples[:, i], 84) for i in
range(np.shape(samples)[1])])
errm, errp = meds - lower, upper - meds
return meds, errm, errp
def make_comparison_plot(true, recovered, errp, errm, iso, ierrp, ierrm,
xlabel, ylabel, fn):
"""
Compare the true property with the injected property.
"""
xs = np.linspace(min(true), max(true))
plt.clf()
plt.errorbar(true, recovered, yerr=[errm, errp], fmt="k.")
plt.errorbar(true, iso, yerr=[ierrm, ierrp], fmt="m.", alpha=.5)
plt.plot(xs, xs, "--", color=".5")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.subplots_adjust(bottom=.15)
plt.savefig(fn)
print(np.median(errm), np.median(errp), np.median(ierrm),
np.median(ierrp))
print(np.mean([np.median(ierrm), np.median(ierrp)])
/np.mean([np.median(errm), np.median(errp)]))
if __name__ == "__main__":
cwd = os.getcwd()
RESULTS_DIR = "/Users/ruthangus/projects/chronometer/chronometer/MH"
DATA_DIR = "/Users/ruthangus/projects/chronometer/chronometer/data"
# Load samples
with h5py.File(os.path.join(RESULTS_DIR, "combined_samples.h5"),
"r") as f:
samples = f["samples"][...]
# Find N stars
npar = np.shape(samples)[1]
N = int((npar - 4)/5)
nglob = 4
print(N, "stars")
# Load iso only samples
with h5py.File(os.path.join(RESULTS_DIR, "combined_samples_iso_only.h5"),
"r") as f:
iso_samples = f["samples"][...]
# Calculate medians and errorbars
recovered_age_samples = samples[:, nglob+N:nglob+2*N]
meds, errm, errp = get_stats_from_samples(np.exp(recovered_age_samples))
iso_age_samples = iso_samples[:, nglob+N:nglob+2*N]
iso, ierrm, ierrp = get_stats_from_samples(np.exp(iso_age_samples))
# Load truths
df = pd.read_csv(os.path.join(DATA_DIR, "fake_data.csv"))
true_ages = df.age.values[:N]
# Make plot
make_comparison_plot(true_ages, meds, errp, errm, iso, ierrp, ierrm,
"$\mathrm{True~age~(Gyr)}$",
"$\mathrm{Recovered~age~(Gyr)}$",
"compare_ages_{}".format(N))
| mit | -151,283,496,425,190,530 | 31.978022 | 77 | 0.586138 | false | 3.126042 | false | false | false |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/OptRomInfStatement.py | 1 | 5392 | ## @file
# process OptionROM generation from INF statement
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import RuleSimpleFile
import RuleComplexFile
import Section
import OptionRom
import Common.GlobalData as GlobalData
from Common.DataType import *
from Common.String import *
from FfsInfStatement import FfsInfStatement
from GenFdsGlobalVariable import GenFdsGlobalVariable
##
#
#
class OptRomInfStatement (FfsInfStatement):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsInfStatement.__init__(self)
self.OverrideAttribs = None
## __GetOptRomParams() method
#
# Parse inf file to get option ROM related parameters
#
# @param self The object pointer
#
def __GetOptRomParams(self):
if self.OverrideAttribs == None:
self.OverrideAttribs = OptionRom.OverrideAttribs()
if self.OverrideAttribs.NeedCompress == None:
self.OverrideAttribs.NeedCompress = self.OptRomDefs.get ('PCI_COMPRESS')
if self.OverrideAttribs.NeedCompress is not None:
if self.OverrideAttribs.NeedCompress.upper() not in ('TRUE', 'FALSE'):
GenFdsGlobalVariable.ErrorLogger( "Expected TRUE/FALSE for PCI_COMPRESS: %s" %self.InfFileName)
self.OverrideAttribs.NeedCompress = \
self.OverrideAttribs.NeedCompress.upper() == 'TRUE'
if self.OverrideAttribs.PciVendorId == None:
self.OverrideAttribs.PciVendorId = self.OptRomDefs.get ('PCI_VENDOR_ID')
if self.OverrideAttribs.PciClassCode == None:
self.OverrideAttribs.PciClassCode = self.OptRomDefs.get ('PCI_CLASS_CODE')
if self.OverrideAttribs.PciDeviceId == None:
self.OverrideAttribs.PciDeviceId = self.OptRomDefs.get ('PCI_DEVICE_ID')
if self.OverrideAttribs.PciRevision == None:
self.OverrideAttribs.PciRevision = self.OptRomDefs.get ('PCI_REVISION')
# InfObj = GenFdsGlobalVariable.WorkSpace.BuildObject[self.PathClassObj, self.CurrentArch]
# RecordList = InfObj._RawData[MODEL_META_DATA_HEADER, InfObj._Arch, InfObj._Platform]
# for Record in RecordList:
# Record = ReplaceMacros(Record, GlobalData.gEdkGlobal, False)
# Name = Record[0]
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @retval string Generated .efi file name
#
def GenFfs(self):
#
# Parse Inf file get Module related information
#
self.__InfParse__()
self.__GetOptRomParams()
#
# Get the rule of how to generate Ffs file
#
Rule = self.__GetRule__()
GenFdsGlobalVariable.VerboseLogger( "Packing binaries from inf file : %s" %self.InfFileName)
#FileType = Ffs.Ffs.ModuleTypeToFileType[Rule.ModuleType]
#
# For the rule only has simpleFile
#
if isinstance (Rule, RuleSimpleFile.RuleSimpleFile) :
EfiOutputList = self.__GenSimpleFileSection__(Rule)
return EfiOutputList
#
# For Rule has ComplexFile
#
elif isinstance(Rule, RuleComplexFile.RuleComplexFile):
EfiOutputList = self.__GenComplexFileSection__(Rule)
return EfiOutputList
## __GenSimpleFileSection__() method
#
# Get .efi files according to simple rule.
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @retval string File name of the generated section file
#
def __GenSimpleFileSection__(self, Rule):
#
# Prepare the parameter of GenSection
#
OutputFileList = []
if Rule.FileName != None:
GenSecInputFile = self.__ExtendMacro__(Rule.FileName)
OutputFileList.append(GenSecInputFile)
else:
OutputFileList, IsSect = Section.Section.GetFileList(self, '', Rule.FileExtension)
return OutputFileList
## __GenComplexFileSection__() method
#
# Get .efi by sections in complex Rule
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @retval string File name of the generated section file
#
def __GenComplexFileSection__(self, Rule):
OutputFileList = []
for Sect in Rule.SectionList:
if Sect.SectionType == 'PE32':
if Sect.FileName != None:
GenSecInputFile = self.__ExtendMacro__(Sect.FileName)
OutputFileList.append(GenSecInputFile)
else:
FileList, IsSect = Section.Section.GetFileList(self, '', Sect.FileExtension)
OutputFileList.extend(FileList)
return OutputFileList
| gpl-2.0 | -6,444,457,340,399,212,000 | 33.793548 | 115 | 0.635386 | false | 3.967623 | false | false | false |
barnone/EigenD | app_cmdline/script.py | 2 | 3985 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
from pisession import session
from pi import index,async,timeout,proxy,resource
import optparse
import sys
import piw
import picross
import traceback
class Connector(proxy.AtomProxy,async.Deferred):
monitor = set()
def __init__(self,address):
async.Deferred.__init__(self)
proxy.AtomProxy.__init__(self)
self.__anchor = piw.canchor()
self.__anchor.set_client(self)
self.__anchor.set_address_str(address)
def close_client(self):
proxy.AtomProxy.close_client(self)
def cancel(self):
self.__anchor.set_address_str('')
self.__anchor.set_client(None)
self.__anchor=None
def node_ready(self):
self.succeeded()
class RpcAdapter(async.DeferredDecoder):
def decode(self):
if self.deferred.status() is False:
return async.Coroutine.failure(self.deferred.args()[0])
return self.deferred.args()[0]
def coroutine(lang,script,ctimeout=3000,rtimeout=3000,verbose=True):
connector = Connector(lang)
timer = timeout.Timeout(connector,ctimeout,False,'cant connect to language agent')
yield timer
if not timer.status():
yield async.Coroutine.failure(*timer.args())
return
if verbose:
print 'connected to',lang,connector.status()
for line in script_reader(script):
rpc = connector.invoke_rpc('exec',line,time=rtimeout)
yield rpc
if not rpc.status():
print line,'failed:',rpc.args()[0]
return
if verbose:
print line,'ok'
def script_reader(fp):
for line in fp:
line = line.strip()
if not line or line.startswith('#'): continue
yield line
def open_script(name):
if name == '-':
return sys.stdin
try:
return resource.file_open(name,"r")
except:
return None
def main():
parser = optparse.OptionParser(usage=sys.argv[0]+' [options] agent script')
parser.add_option('--quiet',action='store_true',dest='quiet',default=False,help='quiet')
parser.add_option('--ctimeout',action='store',type='int',dest='ctimeout',default=5000,help='con timeout (5000 ms)')
parser.add_option('--rtimeout',action='store',type='int',dest='rtimeout',default=300000,help='rpc timeout (300000 ms)')
parser.add_option('--verbose',action='store_true',dest='verbose',default=False,help='verbose')
(opts,args) = parser.parse_args(sys.argv)
if len(args) != 3:
parser.error('wrong number of arguments')
lang = args[1]
script = args[2]
fp = open_script(script)
if fp is None:
parser.error('cant open %s' % script)
def handler(ei):
traceback.print_exception(*ei)
return async.Coroutine.failure('internal error')
def failed(msg):
if opts.verbose:
print 'script failed:',msg
picross.exit(-1)
def succeeded():
if opts.verbose:
print 'script finished'
picross.exit(0)
def startup(dummy):
result = async.Coroutine(coroutine(lang,fp,opts.ctimeout,opts.rtimeout,opts.verbose),handler)
result.setErrback(failed).setCallback(succeeded)
return result
picross.pic_set_interrupt()
session.run_session(startup,clock=False)
| gpl-3.0 | -5,788,715,704,505,319,000 | 28.087591 | 123 | 0.65596 | false | 3.784425 | false | false | false |
World-Youth-Days/Dictionary | adapter/old_db_inserter.py | 1 | 6193 | # -*- coding: utf-8 -*-
import codecs
from DbAdapter import DbAdapter
from display_dict import display_dict
db = DbAdapter(None) # define db connection
printable = []
# --------------------------------------------------------------------#
# -------------------------- Open file -------------------------#
# --------------------------------------------------------------------#
def insert_from_file_line_is_record(path_name, delimiter=',', **kwargs):
global printable
tags_pos = None,
try:
f = codecs.open(path_name, "r", 'utf-8')
except SystemError:
print("Error while opening file!")
return 4
print("\nFile: " + path_name + "\n")
rows = ['base', 'mono', 'trans', 'author', 'level']
pos = dict(base=None, mono=None, trans=None, author=None,
level=None) # sorry, I avoid understanding deep/shallow copy specs ;)
const = dict()
# --------------------------------------------------------------------#
# ---------------------- Examine header -------------------------#
# --------------------------------------------------------------------#
header = f.readline().strip().split(delimiter)
print("Header: " + str(header))
print("Kwargs: " + str(kwargs))
for col in rows:
if col in kwargs:
const[col] = kwargs[col]
print("OK: Const " + col + " found")
try:
pos[col] = header.index(col)
print("OK: " + col + " at column " + str(pos[col]))
except ValueError:
print("Info: No " + col + " header found")
del pos[col]
if 'tags' in kwargs: # find sources of tags
const_tags = kwargs['tags'].split(',')
else:
const_tags = None
if 'tags' in header:
tags_pos = header.index('tags')
print("pos: " + str(pos))
print("const: " + str(const))
print("const_tags: " + str(const_tags))
print("tags_pos: " + str(tags_pos))
# --------------------------------------------------------------------#
# ------------------ Check for integrity ----------------------#
# --------------------------------------------------------------------#
if len(pos) + len(const) < 4:
print("Error: Insufficient information provided to fill all columns.")
return 2
if pos['base'] is None:
print("Warning: No base-word, assuming 0-th column as base")
pos['base'] = 0
if 'trans' not in pos and 'mono' not in pos:
print("Error: Neither monolingual nor translation defined, error!")
return 1
if (tags_pos is None) and const_tags is None:
print("Error: No tags provided!")
return 3
# --------------------------------------------------------------------#
# ---------------------- Build records -------------------------#
# --------------------------------------------------------------------#
for line in f:
d = dict()
line = line.strip().split(delimiter)
for key in const:
d[key] = const[key]
for key in pos: # constant values CAN be overridden by those
# taken directly from table (^-^)
d[key] = line[pos[key]]
records.append(d)
# need to print records in purpose of confirmation by human...
# for r in records:
# print r
display_dict(records, rows) # display using new method form display_dict.py
# --------------------------------------------------------------------#
# ---------------------- Human check ;) -------------------------#
# --------------------------------------------------------------------#
if "force_yes" in kwargs and kwargs["force_yes"] == True:
print("Automatic yes chosen...")
elif input("Are those OK?[y/n]") not in ['y', 'yes', 'Y', 'Yes']:
print("Aborting...")
return 5
global db
db.add_words(records) # add words to db
# --------------------------------------------------------------------#
# ---------------------- Add tags -------------------------#
# --------------------------------------------------------------------#
# --------need to add remove-# feature
ids = []
for r in records: # add const_tags
del r['time']
print(r)
print(str(db.find_id(r)))
ids.append((db.find_id(r))[0])
# I'm pretty sure to find one record here...
if const_tags is not None:
db.join(ids, const_tags)
print("Joined all with tags: " + str(const_tags))
f.seek(0) # start new reading, skip header
f.readline()
i = 0
if tags_pos is not None:
for line in f: # add tags form tags_pos
line = line.strip().split(delimiter)
word = db.find_id(records[i])
db.join(word, line[tags_pos:])
print("Joined " + str(word) + "with tags " + str(line[tags_pos:]))
i += 1
print("Closing...")
f.close()
def test_tags_table():
db = DbAdapter(None)
db.set_readable('const_tag_1', 'First Constant Tag')
db.set_readable('rock4ever', 'Rock for Ever')
db.set_flag('const_tag_1', 'hidden')
db.set_flag('live_tag1', 'live')
db.set_flag('live_tag_2', 'live')
print(db.get_tag("heheszki"))
# --------------------------------------------------------------------#
# ---------------------- Call the function-------------------------#
# --------------------------------------------------------------------#
insert_from_file_line_is_record("../data/test1.txt", author="francuski", tags="from_fr,to_pl",
level=10, force_yes=True)
insert_from_file_line_is_record("../data/test2.txt", author="angielski", tags="from_en,to_pl",
level=4, force_yes=True)
insert_from_file_line_is_record("../data/test3.txt", author="śmieszek",
tags="from_pl,to_pl", force_yes=False)
test_tags_table()
#
# --------------------------------------------------------------------#
# ---------------------- CSV import -------------------------#
# --------------------------------------------------------------------#
def import_from_csv(path, **kwargs):
import csv
global db, printable
tags_pos = None,
try:
f = csv.reader(codecs.open("foo.csv", encoding="utf-8"), dialect='excel')
except SystemError:
print("Error while opening file!")
return 4
print("\nFile: " + path_name + "\n")
rows = ['base', 'mono', 'trans', 'author', 'level']
pos = dict(base=None, mono=None, trans=None, author=None,
level=None) # sorry, I avoid understanding deep/shallow copy specs ;)
const = dict()
| gpl-3.0 | 5,694,650,053,456,996,000 | 30.753846 | 94 | 0.465278 | false | 3.595819 | false | false | false |
gmalmquist/unix-hollymonitor | src/unix-hollymonitor.py | 1 | 4266 | #!/usr/bin/env python
# This is a script to run the hollymonitor in a little
# standalone webserver, rather than being integrated
# into a larger application.
from __future__ import print_function
from BaseHTTPServer import BaseHTTPRequestHandler
from subprocess import Popen, PIPE, STDOUT
import mimetypes
import os
import re
import SocketServer
import shutil
import sys
SCRIPT_DIR = None
def execute_maybe(file_path):
try:
h = open(file_path, 'r')
line = h.readline()
h.close()
except:
return None
#print(file_path, line)
if line and line.startswith('#!'):
command = line[2:].split(' ')
command = [c.strip() for c in command]
try:
p = Popen(command + [file_path], cwd=SCRIPT_DIR, stdout=PIPE, stderr=STDOUT)
out, err = p.communicate()
return out
except Exception as e:
pass
return None
class HollyHandler(BaseHTTPRequestHandler):
def do_GET(self):
file_path = os.path.join(SCRIPT_DIR, self.path[1:])
file_path = os.path.abspath(file_path)
file_path = os.path.relpath(file_path, SCRIPT_DIR)
if '..' in file_path:
self.send_response(403)
self.end_headers()
return
file_path = os.path.abspath(os.path.join(SCRIPT_DIR, file_path))
content_type = 'text/html; charset=utf-8'
if self.path == '/' or self.path == '':
status_html = os.path.join(SCRIPT_DIR, 'html', 'status.html')
if os.path.exists(status_html):
host = self.headers['Host']
self.send_response(301)
self.send_header('Location', 'http://{host}/html/status.html'.format(host=host))
self.end_headers()
return
if os.path.exists(file_path):
self.send_response(200)
if os.path.isdir(file_path):
message = '''<html>
<head><title>Directory {rel_path}</title></head>
<body>
<h1>Directory {rel_path}</h2>
<ul>
'''.format(rel_path = os.path.relpath(file_path, SCRIPT_DIR))
for f in sorted(os.listdir(file_path),
key = lambda f: (0, f) if os.path.isdir(os.path.join(file_path, f)) else (1, f)):
path = os.path.join(os.path.relpath(file_path, SCRIPT_DIR), f)
name = f
if os.path.isdir(os.path.join(SCRIPT_DIR, path)):
name = name + '/'
message += '<li>'
message += '<a href="{path}">{name}</a>'.format(path=name, name=name)
message += '</li>\n'
message += '</ul>\n</body>\n</html>\n'
else:
message = execute_maybe(file_path)
if message is not None:
self.wfile.write(message)
return
h = open(file_path, 'rb')
message = h.read()
h.close()
mime_type, mime_encoding = mimetypes.guess_type(file_path)
if not mime_type:
#print('Mime-type unknown, defaulting to text/html.')
content_type = 'text/html; charset=utf-8'
else:
#print('Mime-type is', mime_type, mime_encoding)
if not mime_encoding:
content_type = mime_type
else:
content_type = '%s; %s' % (mime_type, mime_encoding)
else:
self.send_response(404)
return
self.send_header('Content-type', content_type)
self.send_header('content-length', len(message))
self.end_headers()
self.wfile.write(message)
def start_cpu_recorder():
p = Popen([
'python',
os.path.join(SCRIPT_DIR, 'cpu-reporter.py'),
os.path.join(SCRIPT_DIR, 'cpu-usage.js')
])
def main(args, script_dir, script_path):
global SCRIPT_DIR
SCRIPT_DIR = script_dir
VARS = {}
FLAGS = set()
for arg in args:
if '=' in arg:
key, val = arg.split('=')
VARS[key] = val
else:
FLAGS.add(arg)
port = 8080
if 'port' in VARS:
if not re.match('^[0-9]+$', VARS['port']):
print('Port "%s"' % VARS['port'], 'is not valid, must be a number.')
else:
port = int(VARS['port'])
print('Starting CPU recorder...')
start_cpu_recorder()
print('Starting standalone webserver on port', port)
print('Use the command-line argument port=xxxx to change the port.')
httpd = SocketServer.TCPServer(('', port), HollyHandler)
httpd.serve_forever()
main(sys.argv[1:], os.path.dirname(sys.argv[0]), sys.argv[0])
| mit | -7,604,787,221,590,789,000 | 28.219178 | 95 | 0.599391 | false | 3.361702 | false | false | false |
mtayseer/infoq-downloader | infoq_downloader.py | 1 | 4839 | #!/usr/bin/env python
from __future__ import division, print_function
import os
import sys
import re
import argparse
import requests
import cssselect
import lxml.html
import unicodedata
if sys.version_info.major == 3:
text_type = str
else:
text_type = unicode
# Some settings
download_directory = 'downloads'
cleanup_elements = [
'#footer', '#header', '#topInfo', '.share_this', '.random_links',
'.vendor_vs_popular', '.bottomContent', '#id_300x250_banner_top',
'.presentation_type', '#conference', '#imgPreload', '#text_height_fix_box',
'.download_presentation', '.recorded', 'script[async]',
'script[src*=addthis]'
]
# Set argparse to parse the paramaters
parser = argparse.ArgumentParser(description='Download InfoQ presentations.')
parser.add_argument('url', metavar='URL', type=str,
help='URL of the presentation to download')
# Parse the arguments passed to the script
args = parser.parse_args()
url = args.url
# Tell infoq that I'm an iPad, so it gives me simpler HTML to parse & mp4 file
# qto download
user_agent = (
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) "
"AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b "
"Safari/531.21.10')"
)
# Start downloading
print('Downloading HTML file')
content = requests.get(url, headers={'User-Agent': user_agent}).content
html_doc = lxml.html.fromstring(content)
title = html_doc.find(".//title").text
video_url = html_doc.cssselect('video > source')[0].attrib['src']
video_file = os.path.split(video_url)[1]
html_doc.cssselect('video > source')[0].attrib['src'] = video_file
# Clean the page
for elt in html_doc.cssselect(', '.join(e for e in cleanup_elements)):
elt.getparent().remove(elt)
html_doc.cssselect('#wrapper')[0].attrib['style'] = 'background: none'
content = lxml.html.tostring(html_doc).decode('utf-8')
# Make slides links point to local copies
slides_re = re.compile(r"'(/resource/presentations/[^']*?/en/slides/[^']*?)'")
slides = slides_re.findall(content)
# Create a directory for the downloaded presentation if it doesn't exist
if not os.path.exists(download_directory):
os.makedirs(download_directory)
# presentation folder path
if isinstance(title, text_type):
normalized_title = unicodedata.normalize('NFKD', title)
else:
normalized_title = text_type(title)
presentation_directory = os.path.join(download_directory, normalized_title)
# Create a folder with the name of the presentation
if not os.path.exists(presentation_directory):
os.makedirs(presentation_directory)
# Create a slides folder inside the presentation folder
if not os.path.exists('{}/slides'.format(presentation_directory)):
os.makedirs('{}/slides'.format(presentation_directory))
#Write content
content = re.sub(r"/resource/presentations/[^']*?/en/", '', content)
with open('{}/index.html'.format(presentation_directory), 'w') as f:
f.write(content)
f.flush()
# Download slides
slides_dir = os.path.join(presentation_directory, 'slides')
if not os.path.isdir(slides_dir):
os.makedirs(slides_dir)
for i, slide in enumerate(slides):
filename = os.path.split(slide)[1]
full_path = os.path.join(slides_dir, '{0}'.format(filename))
if os.path.exists(full_path):
continue
print('\rDownloading slide {0} of {1}'.format(i+1, len(slides)), end='')
sys.stdout.flush() # Hack for Python 2
url = 'http://www.infoq.com{0}'.format(slide)
with open(full_path, 'wb') as f:
f.write(requests.get(url).content)
print()
# If the video file is already downloaded successfully, don't do anything else
if os.path.exists(video_file):
print('Video file already exists')
sys.exit()
# Download the video file. stream=True here is important to allow me to iterate
# over content
downloaded_file = os.path.join(
presentation_directory, '{}.part'.format(video_file)
)
if os.path.exists(downloaded_file):
bytes_downloaded = os.stat(downloaded_file).st_size
else:
bytes_downloaded = 0
r = requests.get(video_url, stream=True,
headers={'Range': 'bytes={0}-'.format(bytes_downloaded)})
content_length = int(r.headers['content-length']) + bytes_downloaded
with open(downloaded_file, 'ab') as f:
for chunk in r.iter_content(10 * 1024):
f.write(chunk)
f.flush()
# \r used to return the cursor to beginning of line, so I can write
# progress on a single line.
# The comma at the end of line is important, to stop the 'print' command
# from printing an additional new line
percent = f.tell() / content_length * 100
print('\rDownloading video {0:.2f}%'.format(percent), end='')
sys.stdout.flush() # Hack for Python 2
final_video_name = os.path.join(presentation_directory, video_file)
os.rename(downloaded_file, final_video_name)
| mit | 752,140,745,732,130,700 | 33.81295 | 80 | 0.692498 | false | 3.36509 | false | false | false |
cwrubiorobots/ramblerVision | backup/shelterfind-serial-kogeto.py | 1 | 4607 | #!/usr/bin/python
import cv, serial, struct
from datetime import datetime
cyril = serial.Serial('/dev/ttyAMA0', 9600) #open first serial port and give it a good name
print "Opened "+cyril.portstr+" for serial access"
centerX = 175 #160
centerY = 140 #120
cropped = None
img = None
# decrease angular resolution for 8-bit serial transport
def derez(x):
if( x < 90 ):
return (-90-x)/2
else:
return (270-x)/2
# allow user to click on image from camera to set the center for transformation
def on_mouse(event, x, y, flags, param):
if event==cv.CV_EVENT_LBUTTONDOWN:
print x, ", ", y, ": ", img[y,x]
#print "Set center ", x, ", ", y, ": ", img[y,x]
#global centerX
#global centerY
#centerX = x
#centerY = y
if __name__ == '__main__':
datalog = open("data.log", "w+")
datalog.write("\n~~~=== Rambler Data Log Opened, " + str(datetime.now()) + " ===~~~\n")
capture = cv.CaptureFromCAM(0)
#capture = cv.CaptureFromFile("../out2.mpg")
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
polar = cv.CreateImage((360, 360), 8, 3)
cropped = cv.CreateImage((360, 40), 8, 3)
img = cv.CreateImage((320, 240), 8, 3)
cones = cv.CreateImage((360, 40), 8, 1)
cv.NamedWindow('cam')
cv.NamedWindow('unwrapped')
cv.NamedWindow('target')
cv.SetMouseCallback('cam', on_mouse)
on_mouse(cv.CV_EVENT_LBUTTONDOWN, centerX, centerY, None, None)
# These values determine the range of colors to detect as "shelter".
#Calibration A: finding cones in room 817
lower = cv.Scalar(40, 90, 170) # (B, G, R)
upper = cv.Scalar(80, 180, 255)
#Calibration B: finding green paper in 817
#lower = cv.Scalar(10, 90, 10)
#upper = cv.Scalar(99, 255, 90)
M = 69
while True:
img = cv.QueryFrame(capture)
cv.LogPolar(img, polar, (centerX, centerY), M+1, cv.CV_INTER_NN) #possible speedup - get subrect src
#cropped = cv.GetSubRect(polar,(280,0,40,360))
#cv.Transpose(cropped, cropped)
cv.Transpose(cv.GetSubRect(polar,(280,0,40,360)), cropped)
cv.Flip(cropped) #just for viewing (possible speedup)
cv.InRangeS(cropped, lower, upper, cones)
cv.Erode(cones, cones) # just once might be too much
k = cv.CreateStructuringElementEx(3, 43, 1, 1, cv.CV_SHAPE_RECT) # create a 3x43 rectangular dilation element k
cv.Dilate(cones, cones, k, 2)
#scan top row of thresholded, eroded, dilated image, find the number of contiguous segments and their location
s = 0 # size of contiguous segment
ss = 0 #number of contiguous segments
bearingToLandmarks = []
for i in xrange(360-2):
c = cones[0, i] #current
n = cones[0, i+1] #next
#print int(c),
if (c == 0 and n == 255) or \
(c == 255 and n == 255): # this condition marks beginning or middle of contiguous segment
s = s + 1
#print ".",
elif (c == 255 and n == 0): # end of contiguous segment
ss = ss + 1
bearingToLandmarks.append((i-s/2, s))
s = 0
#handle wraparound
if (i == 360-2-1 and s != 0): #TODO: double check this offset
if (cones[0,0] == 255):
#print "edge case A"
bearingToLandmarks[0] = ((bearingToLandmarks[0][0]-s/2)%360, bearingToLandmarks[0][1]+s) #TODO: recalculate center more accurately
else:
#print "edge case B"
bearingToLandmarks.append((c-s/2, s))
#print ".", ss, "."
#bearingToLandmarks.append((derez(g), 12))
#g = (g + 1) % 360
print bearingToLandmarks, len(bearingToLandmarks)
#TODO - Bearing output
if len(bearingToLandmarks) > 0:
output = struct.pack('c','\xfa') \
+ struct.pack('B', 0) \
+ struct.pack('b', derez(bearingToLandmarks[0][0]) ) \
+ struct.pack('B', 0)
cyril.write(output)
#Data Logging
if (cyril.inWaiting() > 0):
logdata = cyril.read(cyril.inWaiting())
a = 0
b = 0
for c in logdata:
if c == '\n':
datalog.write(str(datetime.now().time())+","+logdata[a:b]+"\n")
a = b + 1
b = b + 1
cv.ShowImage('cam', img)
cv.ShowImage('target', cones)
cv.ShowImage('unwrapped', cropped)
key = cv.WaitKey(10) # THIS REQUIRES AT LEAST ONE WINDOW
#print "key ",key
if key > 0:
break
cv.DestroyAllWindows()
cyril.close()
datalog.write("\n~~~=== Rambler Data Log Closed, " + str(datetime.now()) + " ===~~~\n")
datalog.close()
| gpl-2.0 | -3,594,148,619,330,314,000 | 31.907143 | 146 | 0.598003 | false | 3.057067 | false | false | false |
xiexiangwei/xGame | gamecenter/main.py | 1 | 2054 | # coding:utf-8
import platform
import sys
sys.path.append("../")
if 'twisted.internet.reactor' not in sys.modules:
if platform.system() == "Linux":
from twisted.internet import epollreactor
epollreactor.install()
else:
from twisted.internet import iocpreactor
iocpreactor.install()
import logging
from logging.handlers import TimedRotatingFileHandler
from twisted.internet import reactor
from twisted.python import log
from common import daemon, utils, const, servermanager
import clientfactory
import config
import random
import time
import redishelper
import mysqlhelper
def MainStop():
pass
def MainRun(isdaemon, id):
random.seed(time.time())
logging.getLogger().setLevel(config.instance.log_level)
handler = TimedRotatingFileHandler(filename=config.instance.log_file, when='D', interval=1)
handler.setLevel(config.instance.log_level)
formatter = logging.Formatter(config.instance.log_format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
log.PythonLoggingObserver().start()
if not isdaemon:
handler = logging.StreamHandler()
handler.setLevel(config.instance.log_level)
formatter = logging.Formatter(config.instance.log_format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
redishelper.instance.start()
mysqlhelper.instance.start()
clientfactory.instance.start(u"", config.instance.server_port, config.instance.max_client)
logging.info(u"游戏中心服务器启动成功")
def StartRequest(isdaemon):
config.instance.server_ip = utils.getExternalIP()
servermanager.instance.start(const.CLIENT_TYPE_GAMECENTER,
config.instance,
MainRun,
isdaemon)
reactor.run()
logging.info(u"游戏中心服务器停止运行")
MainStop()
def Run():
daemon.run(config.instance.server_pid, StartRequest)
if __name__ == "__main__":
Run()
| apache-2.0 | -3,971,600,087,697,623,000 | 26.916667 | 95 | 0.695522 | false | 3.806818 | true | false | false |
jaredthomas68/FEM | src/fem.py | 1 | 32857 | import math as m
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve
import time
import matplotlib.pylab as plt
def ffunc_constant(x, a):
"""
Constant valued forcing function
:param x: point at which to evaluate the forcingg function
:param a: parameter values, in this case the value of the constant
:return: result of function evaluation, in this case the constant 'a'
"""
f = a
return f
def ffunc_linear(x, a=np.array([0, 1])):
"""
Linear forcing function
:param x: point at which to evaluate the forcingg function
:param a: parameter values, in this case an array with two elements
:return: the result of the function evaluation
"""
f = a[0] + a[1]*x
return f
def ffunc_quadratic(x, a=np.array([0, 0, 1])):
"""
Quadratic forcing function
:param x: point at which to evaluate the forcingg function
:param a: parameter values, in this case an array with three elements
:return: the result of the function evaluation
"""
f = a[0] + a[1]*x + a[2]*x**2
return f
def ffunc_cubic(x, a=np.array([0., 0., 0., 10.])):
f = a[0] + a[1]*x + a[2]*x**2 + a[3]*x**3
return f
def ffunc_beam(xr, a=np.array([10, 0.005, 0]), Ndof=6):
"""
Forcing function defined for coding 2 part 2
:param xr: location on beam normalized to be in [0, 1]
:param a: [a, h]
:return: forcing function value
"""
# load cases corresponding to a[-1]
# 0: constant axial load
# 1: constant transverse load
# 2: Linearly distributed transverse load (0 at left, N at right)
f = a[0]
F = np.zeros([Ndof, xr.size])
if a[-1] == 0:
F[2, :] = f
elif a[-1] == 1:
F[0, :] = -f
elif a[-1] == 2:
F[0, :] = -xr*f
return F
def moment_of_inertia_rectangle(b, h):
Ix = (b*h**3)/12.
Iy = (h*b**3)/12.
Ixy = 0.
return Ix, Iy, Ixy
def moment_of_inertia_rod(d):
Ix = (np.pi*d**4)/64.
Iy = (np.pi*d**4)/64.
Ixy = 0.
# J = (np.pi*d**4)/32.
return Ix, Iy, Ixy
def fem_solver(Nell, he, Nint, p, ID, E, I1, I2, J, A, nu, ffunc=ffunc_quadratic, ffunc_args=np.array([0., 0., 1.]), case=2, Ndof=6.):
p = int(p)
Ndof = int(Ndof)
print "entering solver"
# define LM array
IEN = ien_array(Nell, p, Ndof)
LM = lm_array(Nell, p, ID, IEN, Ndof)
# initialize global stiffness matrix
# K = np.zeros((ID[ID[:]>0].shape)) #coding 2
K = np.zeros((int(np.max(ID)), int(np.max(ID)))) # coding 3
# initialize global force vector
F = np.zeros(int(np.max(ID)))
# get quadrature points and weights in local coordinants
xi, w = quadrature_rule(Nint)
# get node locations in global coordinates
xe = node_locations_x(Nell, he)
# find length of beam
L = np.sum(he)
# get the knot vector
S = knot_vector(Nell, xe, p)
# find the Greville Abscissae
ga = greville_abscissae(S, p)
mu = E/(2.*(1.+nu)) # nu = Poisson's ratio
A1s = A2s = 5./6.*A
D = get_D(A, E, mu, A1s, A2s, I1, I2, J)
# loop over elements
for e in np.arange(1, Nell+1):
# print "in element loop"
ke = np.zeros([(p + 1)*Ndof, (p + 1)*Ndof])
fe = np.zeros((p + 1)*Ndof)
# solve for local stiffness matrix and force vector
for i in np.arange(0, Nint):
# print "in integration loop"
B, Bdxi, Bdxidxi = local_bernstein(xi[i], p)
N, Nedxi, Nedxidxi = local_bezier_extraction(p, e, Nell, B, Bdxi, Bdxidxi)
Ndx, Ndxdx, dxdxi, x = global_bezier_extraction(ga[e-1:e+p],N, Nedxi, Nedxidxi)
# get f for each dof at this location
f = ffunc(x/L, ffunc_args, Ndof)
# get base k matrix for element e
for a in np.arange(0, p+1):
Ba = get_B(N[a], Ndx[a])
for b in np.arange(0, p+1):
Bb = get_B(N[b], Ndx[b])
BDB = np.matmul(np.transpose(Ba), np.matmul(D, Bb))
for idof in np.arange(0, Ndof):
for jdof in np.arange(0, Ndof):
ke[int(a*Ndof+idof), int(b*Ndof+jdof)] += BDB[idof, jdof]*dxdxi*w[i]
# K[int(LM[a, e - 1] - 1), int(LM[b, e - 1] - 1)] += Ndx[a]*E*I*Ndx[b]*w[i]*dxdxi
# element force calcs
for a in np.arange(0, p+1):
for idof in np.arange(0, Ndof):
fe[a*Ndof+idof] += N[a] * f[idof] * dxdxi * w[i]
# assemble global stifness matrix and force vector
for a in np.arange(0, p + 1):
for idof in np.arange(0, Ndof):
if LM[a*Ndof+idof, e - 1] == 0:
continue
# global force vector assembly
F[int(LM[a*Ndof+idof, e - 1] - 1)] += fe[a*Ndof+idof]
for b in np.arange(0, p + 1):
for jdof in np.arange(0, Ndof):
if LM[b*Ndof+jdof, e - 1] == 0:
continue
# global stiffness matrix assembly
K[int(LM[a*Ndof+idof, e - 1] - 1), int(LM[b*Ndof+jdof, e - 1] - 1)] += ke[a*Ndof+idof, b*Ndof+jdof]
# solve for d
d = solve_for_d(K, F)
# determine the number of nodes
Nnodes = Nell + p
# get full solution
solution = get_solution(d, p, Nell, Ndof, Nnodes, LM, ID)
return K, F, d, solution
def get_solution(d, p, Nell, Ndof, Nnodes, LM, ID):
sol = np.zeros(Ndof*Nnodes)
for inode in np.arange(0, Nnodes):
for idof in np.arange(0, Ndof):
if ID[idof, inode] == 0:
continue
sol[inode*Ndof+idof] = d[int(ID[idof, inode]-1)]
return sol
def get_D(A, E, mu, A1s, A2s, I1, I2, J):
"""
Defines the relationship matrix between stress and strain
:param A: cross sectional area of the beam
:param E: Young's modulous of elasticity
:param mu: essentiall the shear modulous (E/(2*(1+poisson's ratio))
:param A1s: shear correction (5/6)
:param A2s: shear correction (5/6)
:param I1: moment of inertia
:param I2: moment of inertia
:param J: polar moment of inertia
:return D: variation on stiffness matrix
"""
D = np.array([[E*A, 0., 0., 0., 0., 0.],
[0., mu*A1s, 0., 0., 0., 0.],
[0., 0., mu*A2s, 0., 0., 0.],
[0., 0., 0., E*I1, 0., 0.],
[0., 0., 0., 0., E*I2, 0.],
[0., 0., 0., 0., 0., mu*J]])
return D
def get_B(Na, dNadx):
Ba = np.array([[0., 0., dNadx, 0., 0., 0. ],
[dNadx, 0., 0., 0., -Na, 0. ],
[0., dNadx, 0., Na, 0., 0. ],
[0., 0., 0., dNadx, 0., 0. ],
[0., 0., 0., 0., dNadx, 0. ],
[0., 0., 0., 0., 0., dNadx]])
return Ba
def solve_for_d(K, F):
sK = sparse.csr_matrix(K)
d = spsolve(sK, F)
return d
def solve_for_displacements(d, Nell, he, g=0):
u = np.zeros(Nell+1)
x1 = 0.0
u[0] = (1.-x1)*d[0]
for e in np.arange(1, Nell):
x1 += he[e]
# u[e] = u[e-1] + (1.-x1)*d[e]
# u[e] = (1.-x1)*d[e]
u[e] = d[e]
# u[-1] = u[-2] + g
u[-1] = g
return u
def node_locations_x(Nell, he):
x_el = np.zeros(Nell + 1)
for e in np.arange(1, Nell):
x_el[e] = x_el[e-1] + he[e-1]
x_el[Nell] = x_el[Nell-1] + he[Nell-1]
return x_el
def quadrature_rule(Nint):
if (Nint < 1 or Nint > 3) or type(Nint) != int:
raise ValueError('Nint must be and integer and one of 1, 2, 3')
gp = np.zeros(Nint)
w = np.zeros(Nint)
if Nint == 1:
gp[0] = 0.
w[0] = 2.
elif Nint == 2:
gp[0] = -1./np.sqrt(3.)
gp[1] = 1./np.sqrt(3.)
w[0] = 1.
w[1] = 1.
elif Nint == 3:
gp[0] = -np.sqrt(3./5.)
gp[1] = 0.
gp[2] = np.sqrt(3./5.)
w[0] = 5./9.
w[1] = 8./9.
w[2] = 5./9.
return gp, w
def get_u_of_x_approx(sol, he, Nell, Nint, p, ID, Nsamples, Ndof=6):
# get IEN array
IEN = ien_array(Nell, p, Ndof)
LM = lm_array(Nell, p, ID, IEN, Ndof)
LM_full = np.copy(LM)
[m, n] = np.shape(LM)
count_zeros = 0
for j in np.arange(0, n):
for i in np.arange(0, m):
if LM_full[i, j] == 0:
count_zeros += 1
LM_full[i, j] += count_zeros
# get quadrature points and weights in local coordinants
# xi_sample, w = quadrature_rule(Nint)
xi_sample = np.linspace(-1, 1, Nsamples)
# find number of samples
# Nsamples = xi_sample.size
# initialize displacement vector
u = np.zeros((Ndof, Nell * Nsamples+1))
# initialize error vector
error = np.zeros(Nell * Nint)
# initialize x vector
X = np.zeros(Nell * Nint)
# get node locations in global coordinates
xe = node_locations_x(Nell, he)
# get the knot vector
S = knot_vector(Nell, xe, p)
# find the Greville Abscissae
ga = greville_abscissae(S, p)
# set up resulting x location vector
x_sample = np.zeros(Nell*Nsamples+1)
# loop over elements
print "start loop"
count = 0
count1 = 0
for e in np.arange(0, Nell):
# loop over samples
# for i in np.arange(0, Nsamples):
for i in np.arange(0, Nsamples):
B, Bdxi, Bdxidxi = local_bernstein(xi_sample[i], p)
N, Nedxi, Nedxidxi = local_bezier_extraction(p, e + 1, Nell, B, Bdxi, Bdxidxi)
Ndx, Ndxdx, dxdxi, x = global_bezier_extraction(ga[e:e + p + 1], N, Nedxi, Nedxidxi)
x_sample[e*Nsamples+i] = x
# print x, xi_sample[i]
u_temp = np.zeros(Ndof)
for a in np.arange(0, p + 1):
for idof in np.arange(0, Ndof):
# idx = int(IEN[a*Ndof+idof, e]) - 1
#TODO correct the indexing
if LM[a * Ndof + idof, e] == 0:
count1 += 1
continue
# u_temp[idof] += N[a] * sol[e*Ndof+idof]
# u_temp[idof] += N[a] * sol[int(LM[a*Ndof+idof, e]+Ndof) - 1]
u_temp[idof] += N[a] * sol[int(LM_full[a*Ndof+idof, e]) - 1]
# u_temp[idof] += N[a] * sol[int(LM[a*Ndof+idof, e]+count1) - 1]
# u_temp[idof] += N[a] * sol[int(IEN[a*Ndof+idof, e]) - 1]
# u_temp[idof] += N[a] * sol[e*Ndof + idof]
# if np.any(u_temp) > 0:
# print "success"
# quit()
# u[int(e * Nint + i)]
u[:, count] = u_temp
count += 1
return u, x_sample
def get_u_of_x_exact(x, q, ffunc_num):
u_ex = 0.
if ffunc_num == 0:
u_ex = q*(1.-x**2)/2.
elif ffunc_num == 1:
u_ex = q*(1.-x**3)/6.
elif ffunc_num == 2:
u_ex = q * (1. - x ** 4) / 12.
return u_ex
def knot_vector(Nell, Xe, p, open=True):
"""
Construct knot vector
:param Nell: number of elements
:param he: array containing the length of each element
:param p: order of basis functions
:return knots: knot vector
"""
# initialize knot vector
knots = np.zeros([Nell+2*p+1])
# populate knot vector
if open:
knots[0:p+1] = Xe[0]
knots[-p-1:] = Xe[-1]
for i in np.arange(1, Nell):
knots[i+p] = Xe[i]
return knots
def greville_abscissae(S, p):
Nell = len(S) - 2*p - 1
GA = np.zeros(Nell+p)
for i in np.arange(0, Nell+p):
GA[i] = (1./p)*(np.sum(S[i+1:i+p+1]))
# print i, GA[i], S[i+1:i+p+1], np.sum(S[i+1:i+p+1]), p
return GA
def get_id(case, Nell, p, Ndof=6):
ID = np.zeros([Ndof, Nell+p])
# cantilever L
if case == 0:
# print 'here in ', case
count = 0
for i in np.arange(1, Nell+p):
for j in np.arange(0, Ndof):
count += 1
ID[j, i] = count
# cantilever R
elif case == 1:
# print 'here in ', case
# print np.arange(1,Nell+p)
ID[2:] = np.arange(1,Nell+p-1)
# coding two part one
elif case == 2:
ID[0:Nell+p-1] = np.arange(1, Nell+p)
# simply supported (pin left, roller right)
elif case == 3:
count = 0
for i in np.arange(0, Nell + p):
for j in np.arange(0, Ndof):
if i == 0 and j != 4:
continue
elif i == Nell + p - 1 and (j == 0 or j == 1 or j == 3 or j == 5):
continue
count += 1
ID[j, i] = count
else:
raise ValueError('invalid support case')
# quit()
return ID
def ien_array(Nell, p, Ndof):
Nell = int(Nell)
Ndof = int(Ndof)
p = int(p)
IEN = np.zeros([Ndof*(p+1), Nell])
for i in np.arange(0, p+1):
for j in np.arange(0, Ndof):
IEN[i*Ndof+j,:] = np.arange(i+1, i+1+Nell)
return IEN
def local_bernstein(xi, p):
# check if xi is in the acceptable range
if np.any(xi < -1) or np.any(xi >1):
raise ValueError("the value of xi is $f, but must be in the range [-1, 1]" %xi)
# check if p is in the acceptable range for this code
# if p > 3 or p < 2:
# raise ValueError("the value of p must be 2 or 3, but %i was given" % p)
# initialize Bernstein polynomial vectors
B = np.zeros(p+1)
Bdxi = np.zeros(p+1)
Bdxidxi = np.zeros(p+1)
for a in np.arange(1., p + 2.):
# compute common factor of B and it's derivatives
eta = (1. / (2. ** p)) * (m.factorial(p) / (m.factorial(a - 1.) * m.factorial(p + 1. - a)))
# calculate the value and derivatives of each element of the Bernstein polynomial vector
# print eta*((1.-xi)**(p-(a-1.)))*((1+xi)**(a-1.))
B[int(a - 1)] = eta * ((1. - xi) ** (p - (a - 1.))) * ((1. + xi) ** (a - 1.))
if xi == -1.:
if p == 2:
Bdxi[0] = -1.
Bdxi[1] = 1.
Bdxi[2] = 0.
Bdxidxi[0] = 0.5
Bdxidxi[1] = -1.0
Bdxidxi[2] = 0.5
elif p == 3:
Bdxi[0] = -1.5
Bdxi[1] = 1.5
Bdxi[2] = 0.
Bdxi[3] = 0.
Bdxidxi[0] = 1.5
Bdxidxi[1] = -3.
Bdxidxi[2] = 1.5
Bdxidxi[3] = 0.
elif xi == 1.:
if p == 2:
Bdxi[0] = 0.
Bdxi[1] = -1.
Bdxi[2] = 1.
Bdxidxi[0] = 0.5
Bdxidxi[1] = -1.0
Bdxidxi[2] = 0.5
if p == 3:
Bdxi[0] = 0.
Bdxi[1] = 0.
Bdxi[2] = -1.5
Bdxi[3] = 1.5
Bdxidxi[0] = 0.
Bdxidxi[1] = 1.5
Bdxidxi[2] = -3.
Bdxidxi[3] = 1.5
else:
# solve for the Bernstein polynomial vectors
for a in np.arange(1, p+2):
# compute common factor of B and it's derivatives
eta = (1./(2.**p))*(m.factorial(p)/(m.factorial(a-1.)*m.factorial(p+1.-a)))
# calculate the value and derivatives of each element of the Bernstein polynomial vector
# print eta*((1.-xi)**(p-(a-1.)))*((1+xi)**(a-1.))
# B[a-1] = eta*((1.-xi)**(p-(a-1.)))*((1+xi)**(a-1.))
Bdxi[a-1] = eta*(((1.-xi)**(p-a+1.))*(a-1.)*((1.+xi)**(a-2.))-
((1.+xi)**(a-1.))*(p-a+1.)*((1.-xi)**(p-a)))
# set up terms for second derivative
t1 = ((1.-xi)**(p-a+1))*(a-2.)*((1+xi)**(a-3.))
t2 = -((1.+xi)**(a-2.))*(p-a+1.)*((1.-xi)**(p-a))
t3 = -((1.+xi)**(a-1.))*(p-a)*((1.-xi)**(p-a-1.))
t4 = ((1.-xi)**(p-a))*(a-1.)*((1.+xi)**(a-2.))
Bdxidxi[a-1] = eta*((a-1.)*(t1+t2)-(p-a+1.)*(t3+t4))
return B, Bdxi, Bdxidxi
def local_bezier_extraction(p, e, Nell, B, Bdxi, Bdxidxi):
# if Nell = 1 C = Identity
# determine the appropriate Bezier extraction matrix
if p == 1 or Nell == 1:
C = np.identity(p+1)
elif p == 2:
if e == 1:
C = np.array([[1., 0., 0. ],
[0., 1., 0.5],
[0., 0., 0.5]])
elif e >=2 and e <= Nell-1.:
C = np.array([[0.5, 0., 0. ],
[0.5, 1., 0.5],
[0., 0., 0.5]])
elif e == Nell:
C = np.array([[0.5, 0., 0.],
[0.5, 1., 0.],
[0., 0., 1.]])
else:
raise ValueError('Invalid value of e. Must be in [1, %i], but %i was given' % (Nell,e))
elif p == 3:
if e == 1:
C = np.array([[1., 0., 0., 0. ],
[0., 1., 0.5, 0.25 ],
[0., 0., 0.5, 7./12.],
[0., 0., 0., 1./6. ]])
elif e == 2:
C = np.array([[0.25, 0., 0., 0. ],
[7./12., 2./3., 1./3., 1./6.],
[1./6., 1./3., 2./3., 2./3.],
[0., 0., 0., 1./6.]])
elif e >= 3 and e <= Nell-2:
C = np.array([[1./6., 0., 0., 0. ],
[2./3., 2./3., 1./3., 1./6.],
[1./6., 1./3., 2./3., 2./3.],
[0., 0., 0., 1./6.]])
elif e == Nell-1.:
C = np.array([[1./6., 0., 0., 0. ],
[2./3., 2./3., 1./3., 1./6. ],
[1./6., 1./3., 2./3., 7./12.],
[0., 0., 0., 0.25 ]])
elif e == Nell:
C = np.array([[1./6., 0., 0., 0.],
[7./12., 0.5, 0., 0.],
[0.25, 0.5, 1., 0.],
[0., 0., 0., 1.]])
else:
raise ValueError('Invalid value of e. Must be in [1, %i], but %i was given' % (Nell, e))
else:
raise ValueError('p must be 2 or 3, but p=%f was given' % p)
# solve for the value of the Bezier basis function and derivatives on the element (Ne)
Ne = np.matmul(C, B)
Nedxi = np.matmul(C, Bdxi)
Nedxidxi = np.matmul(C, Bdxidxi)
return Ne, Nedxi, Nedxidxi
def global_bezier_extraction(GA, Ne, Nedxi, Nedxidxi):
# solve for xe and derivatives
xe = np.sum(GA*Ne)
# print GA, Nedxi
dxedxi = np.sum(GA*Nedxi)
dxedxedxidxi = np.sum(GA*Nedxidxi)
# derivatives of the basis function in global coordinates
Ndx = Nedxi/dxedxi
Ndxdx = (Nedxidxi - Ndx*dxedxedxidxi)/(dxedxi**2)
# print 'dxidxi', dxedxi
return Ndx, Ndxdx, dxedxi, xe
def error_quadrature(solution, p, Nell, Nint, he):
# get IEN array
IEN = ien_array(Nell, p)
# get quadrature points and weights in local coordinants
xi_sample, w = quadrature_rule(Nint)
# initialize displacement vector
u = np.zeros(Nell * Nint)
# initialize error vector
error = np.zeros(Nell * Nint)
# initialize x vector
X = np.zeros(Nell * Nint)
# get node locations in global coordinates
xe = node_locations_x(Nell, he)
# get the knot vector
S = knot_vector(Nell, xe, p)
# find the Greville Abscissae
ga = greville_abscissae(S, p)
# loop over elements
print "start loop"
for e in np.arange(0, Nell):
# loop over samples
# for i in np.arange(0, Nsamples):
for i in np.arange(0, xi_sample.size):
B, Bdxi, Bdxidxi = local_bernstein(xi_sample[i], p)
N, Nedxi, Nedxidxi = local_bezier_extraction(p, e + 1, Nell, B, Bdxi, Bdxidxi)
Ndx, Ndxdx, dxdxi, x = global_bezier_extraction(ga[e:e+p+1],N, Nedxi, Nedxidxi)
# print x, xi_sample[i]
for a in np.arange(0, p + 1):
u[int(e * Nint + i)] += N[a] * solution[int(IEN[a, e]) - 1]
u_ex = get_u_of_x_exact(x, 1, 2)
error[e * Nint + i] += ((u_ex - u[int(e * Nint + i)])**2)*dxdxi*w[i]
# print error, e, i, e * Nint + i, u, X
# print "end loop", error
error = np.sqrt(abs(np.sum(error)))
# initialize location array
# x = np.linspace(0., 1., Nell * Nsamples)
# x_ex = np.linspace(0., 1., 500)
# print x
# print u
# q = 1
# u_ex = get_u_of_x_exact(x_ex, q, 2)
# print error
# quit()
return error, X
def lm_array(Nell, p, ID, IEN, Ndof):
Nell = int(Nell)
p = int(p)
Ndof = int(Ndof)
LM = np.zeros([Ndof*(p+1), Nell])
for a in range(0, p+1):
for i in np.arange(0, Ndof):
for e in np.arange(0, Nell):
LM[a*Ndof+i, e] = ID[i, int(int(IEN[a*Ndof+i, e])-1)]
return LM
def plot_error():
E = I = 1.
Nint = 3
n = np.array([1, 10, 100])
theoretical_error = np.zeros([2, n.size])
real_error = np.zeros([2, n.size])
# slope = np.zeros([2, n.size-1])
q = 1
h = np.zeros([2, n.size])
nodes = np.zeros([2, n.size])
r_slope = np.zeros(2)
t_slope = np.zeros(2)
# print h, n
for p, i in zip(np.array([2, 3]), np.arange(0, 2)):
for Nell, j in zip(n, np.arange(n.size)):
print 'p = %i, N = %i' % (p, Nell)
# run_quadratic(Nell, Nint, p)
nodes[i,j] = Nell + p
he = np.ones(Nell) / Nell
h[i, j] = he[0]
ID = get_id(2, Nell, p)
K, F, d, sol, da = fem_solver(Nell, he, Nint, p, ID, E, I)
# u = solve_for_displacements(d, Nell, he, g=0)
real_error[i, j], x = error_quadrature(sol, p, Nell, Nint, he)
# u_ap = get_u_of_x_approx(x, u, he)
u_ex = get_u_of_x_exact(x, q, 2)
# print u_ap, u_ex
# error[i, j] = np.sum(n(u_ap - u_ex)**2)
theoretical_error[i, j] = (abs(u_ex[0])*he[0]**(p+1))
# print theoretical_error
# print "ffunc: %i, Nell: %i, Error: %f" % (ffunc_num, Nell, error[i, j])
r_slope[i] = -np.log(real_error[i, 2]/real_error[i, 0])/np.log(n[2]/n[0])
t_slope[i] = -np.log(theoretical_error[i, -1]/theoretical_error[i, 0])/np.log(n[-1]/n[0])
# print (np.log(error[1])-np.log(error[0]))/(x[1]-x[0])
# print real_error.shape
# quit()
# np.savetxt('error.txt', np.c_[n, he, np.transpose(error)], header="Nell, h, E(f(x)=c), E(f(x)=x), E(f(x)=x^2)")
# print h.shape, real_error.shape
plt.loglog(h[0, :], theoretical_error[0,:], '--or', label='A priori, $p=2, slope=%.3f$' % t_slope[0])
plt.loglog(h[0, :], real_error[0,:], '-or', label='Real, $p=2$, $slope=%.3f$' % r_slope[0])
plt.loglog(h[1, :], theoretical_error[1,:], '--ob', label='A priori, $p=3, slope=%.3f$' % t_slope[1])
plt.loglog(h[1, :], real_error[1,:], '-ob', label='Real, $p=3$, $slope=%.3f$' % r_slope[1])
# plt.loglog(he, error[2,:], '-o', label='$f(x)=x^2$')
leg = plt.legend(loc=4, frameon=False)
leg.get_frame().set_alpha(0.0)
plt.xlabel('$h$')
plt.ylabel('$Error$')
plt.savefig('error_he.pdf', tranparent=True)
plt.show()
plt.loglog(nodes[0, :], theoretical_error[0, :], '--or', label='A priori, $p=2$' % t_slope[0])
plt.loglog(nodes[0, :], real_error[0, :], '-or', label='Real, $p=2$' % r_slope[0])
plt.loglog(nodes[1, :], theoretical_error[1, :], '--ob', label='A priori, $p=3$' % t_slope[1])
plt.loglog(nodes[1, :], real_error[1, :], '-ob', label='Real, $p=3$' % r_slope[1])
# plt.loglog(he, error[2,:], '-o', label='$f(x)=x^2$')
leg=plt.legend(loc=1, frameon=False)
leg.get_frame().set_alpha(0.0)
plt.xlabel('$Nodes$')
plt.ylabel('$Error$')
plt.savefig('error_nodes.pdf', transparent=True)
plt.show()
return
def plot_displacements(u, x, he, Nell, q=1, ffunc=ffunc_constant, ffunc_args=np.array([1])):
plt.rcParams.update({'font.size': 22})
x_ex = np.linspace(0, 1., 100)
x_el = node_locations_x(Nell, he)
u_ex = get_u_of_x_exact(x_ex, q, ffunc_num=len(ffunc_args)-1)
u_a = get_u_of_x_approx(x, u, he)
plt.figure()
plt.plot(x_ex, u_ex, label="Exact sol.", linewidth=3)
# plt.plot(x_el, u, '-s', label="Approx. sol. (nodes)")
plt.plot(x, u_a, '--r', markerfacecolor='none', label="Approx. sol.", linewidth=3)
plt.xlabel('X Position')
plt.ylabel("Displacement")
functions = ["$f(x)=c$", "$f(x)=x$", "$f(x)=x^2$"]
# plt.title(functions[ffunc]+", $n=%i$" %Nell, y=1.02)
plt.legend(loc=3, frameon=False)
plt.tight_layout()
# plt.savefig("displacement_func%i_Nell%i.pdf" %(ffunc, Nell))
plt.show()
plt.close()
return
def beam_solution_1():
# problem parameters
E = 200E9 # (Pa) modulous of elasity for steel
nu = 0.3 # poisson's ratio for steel
b = 0.005
h = 0.005
d = 0.02
A = b*h
l = 1.
Nint = 3
Ndof = 6
Px = 10.
# get the moment of inertia of the cross section
Ix, Iy, _ = moment_of_inertia_rectangle(b, h)
# Ix, Iy, _ = moment_of_inertia_rod(d)
J = Ix + Iy # polar moment of inertia
# print Ix*E
# quit()
# set cases to use
coding_3_problem = 3
# support_case = 0 # 0: cantilever fixed on the left
# # 1: cantilever fixed on the right
# # 2: coding 2 part 1
# # 3: simply supported (pin left, roller right)
#
# load_case = 0 # 0: constant axial load
# # 1: constant transverse load
# # 2: Linearly distributed transverse load (0 at left, N at right)
if coding_3_problem == 2:
support_case = 0
load_case = 0
# number of elements
n = np.array([1, 10])
# order of basis
p_vector = np.array([1])
plotdofs = np.array([2])
leg_loc = 4
elif coding_3_problem == 3:
support_case = 0
load_case = 1
# number of elements
n = np.array([10, 100])
# order of basis
p_vector = np.array([1, 2, 3])
plotdofs = np.array([0, 4])
leg_loc = 3
elif coding_3_problem == 4:
support_case = 3
load_case = 2
# number of elements
n = np.array([10, 100])
# order of basis
p_vector = np.array([1, 2, 3])
plotdofs = np.array([0, 4])
leg_loc = 9
else:
raise ValueError('Invalid problem number')
# forcing function
ffunc = ffunc_beam
# forcing function arguments
ffunc_args = np.array([Px, 1., load_case])
max_deflection_fem = np.zeros([p_vector.size, n.size, Ndof])
max_deflection_theoretical = np.zeros([p_vector.size, n.size, Ndof])
nodes = np.zeros([p_vector.size, int(n.size)])
num = 50
x_exact = np.linspace(0, l + l / num, num)
u_exact = np.zeros((Ndof, x_exact.size))
if coding_3_problem == 2:
u_exact[2, :] = (-Px * (1. - (x_exact - 1.) ** 2) / (2. * E * A))
elif coding_3_problem == 3:
u_exact[0, :] = (-Px * x_exact ** 2) * (
x_exact ** 2 + 6. * l ** 2 - 4. * l * x_exact) / (24. * E * Ix)
u_exact[4, :] = (-Px * x_exact) * (
3. * l ** 2 - 3. * l * x_exact + x_exact ** 2) / (6. * E * Ix)
elif coding_3_problem == 4:
u_exact[0, :] = (-Px * x_exact / (360. * l * E * Ix)) * (
7. * l ** 4 - 10. * (l ** 2) * (x_exact ** 2) + 3. * x_exact ** 4)
u_exact[4, :] = (-Px / (360. * l * E * Ix)) * (
7. * l ** 4 - 30. * (l ** 2) * (x_exact ** 2) + 15. * x_exact ** 4)
for plotdof in plotdofs:
figure, axes = plt.subplots(p_vector.size, n.size, sharex=True, sharey=True)
for p, i in zip(p_vector, np.arange(0, p_vector.size)):
for Nell, j in zip(n, np.arange(0, n.size)):
# vector of element lengths
he = np.ones(Nell) / Nell
# vector of element locations
x = np.linspace(0, 1, 4 * Nell + 1)
# ID array
ID = get_id(support_case, Nell, p, Ndof)
# if Nell == 10:
#
# print ID
# quit()
nodes[i, j] = Nell+p
tic = time.time()
K, F, d, sol = fem_solver(Nell, he, Nint, p, ID, E, Ix, Iy, J, A, nu, ffunc=ffunc, ffunc_args=ffunc_args, case=support_case, Ndof=Ndof)
toc = time.time()
# print he, Nell, K
print "Time to run fem solver: %.3f (s)" % (toc - tic)
Nsamples = int(100./Nell)
u, x = get_u_of_x_approx(sol, he, Nell, Nint, p, ID, Nsamples)
# print np.array([1. / 6., 21. / 128., 7. / 48., 37. / 384., 0])
print "Time to solve for u(x): %.3f (s)" % (toc - tic)
print "Finished"
# print d, u, x[::-1]
# if Nell > 1:
max_deflection_fem[i, j, plotdof] = np.amax(np.abs(u[plotdof, :]))
max_deflection_theoretical[i, j, plotdof] = np.amax(np.abs(u_exact[plotdof, :]))
print ""
print "problem %i" % coding_3_problem
print "p=%i, Nell=%i, DOF=%i" %(p, Nell, plotdof)
print "Analytical max def: %s" %(max_deflection_theoretical[i, j, plotdof])
print "FEM max def: %s" %(max_deflection_fem[i, j, plotdof])
print ""
if p_vector.size == 1 and n.size == 1:
axes.plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes.plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
elif p_vector.size == 1:
axes[j].plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes[j].plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
elif n.size == 1:
axes[i].plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes[i].plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
else:
axes[i,j].plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes[i,j].plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
for i in np.arange(0, p_vector.size):
if p_vector.size == 1 and n.size == 1:
axes.set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes.legend(loc=leg_loc, frameon=False)
elif p_vector.size == 1:
axes[0].set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes[0].legend(loc=leg_loc, frameon=False)
elif n.size == 1:
axes[i].set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes[i].legend(loc=leg_loc, frameon=False)
else:
axes[i, 0].set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes[0, -1].legend(loc=leg_loc, frameon=False)
for j in np.arange(0, n.size):
if p_vector.size == 1 and n.size == 1:
axes.set_xlabel('X Position')
axes.set_title('$N_{ell}=%i$' % (n[j]))
elif p_vector.size == 1:
axes[j].set_xlabel('X Position')
axes[j].set_title('$N_{ell}=%i$' % (n[j]))
elif n.size == 1:
axes[-1].set_xlabel('X Position')
axes[0].set_title('$N_{ell}=%i$' % (n[j]))
else:
axes[-1, j].set_xlabel('X Position')
axes[0, j].set_title('$N_{ell}=%i$' % (n[j]))
plt.tight_layout()
# axes[0,0].legend('Exact', 'FEM')
plt.savefig('beam1_deflection_prob%i_dof%i.pdf' % (coding_3_problem, plotdof))
plt.show()
# for plotdof in plotdofs:
# fig = plt.figure()
#
# plt.plot(nodes[0,:], max_deflection_thoeretical[plotdof,:], 'r', label='theoretical')
# plt.plot(nodes[0,:], max_deflection_fem[plotdof,:],'--ob', label='fem, p=2')
# # plt.plot(nodes[1,:], max_deflection_thoeretical[1,:], label='theoretical, p=3')
# plt.plot(nodes[1,:], max_deflection_fem[plotdof,:], '--og', label='fem, p=3')
# plt.xlabel('Nodes')
# plt.ylabel('Max Deflection')
# # plt.ylim([0.0028, 0.0032])
# plt.legend(loc = 0)
#
# plt.tight_layout()
# plt.savefig('max_deflection_vs_n.pdf', transparent=True)
# plt.show()
return
if __name__ == "__main__":
beam_solution_1() | mit | 4,212,410,585,531,477,000 | 30.11553 | 151 | 0.474754 | false | 2.815751 | false | false | false |
librelab/qtmoko-test | qtopiacore/qt/util/local_database/qlocalexml2cpp.py | 1 | 18278 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation ([email protected])
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at [email protected].
## $QT_END_LICENSE$
##
#############################################################################
import sys
import xml.dom.minidom
def check_static_char_array_length(name, array):
# some compilers like VC6 doesn't allow static arrays more than 64K bytes size.
size = reduce(lambda x, y: x+len(escapedString(y)), array, 0)
if size > 65535:
print "\n\n\n#error Array %s is too long! " % name
sys.stderr.write("\n\n\nERROR: the content of the array '%s' is too long: %d > 65535 " % (name, size))
sys.exit(1)
def wrap_list(lst):
def split(lst, size):
for i in range(len(lst)/size+1):
yield lst[i*size:(i+1)*size]
return ",\n".join(map(lambda x: ", ".join(x), split(lst, 20)))
def firstChildElt(parent, name):
child = parent.firstChild
while child:
if child.nodeType == parent.ELEMENT_NODE \
and (not name or child.nodeName == name):
return child
child = child.nextSibling
return False
def nextSiblingElt(sibling, name):
sib = sibling.nextSibling
while sib:
if sib.nodeType == sibling.ELEMENT_NODE \
and (not name or sib.nodeName == name):
return sib
sib = sib.nextSibling
return False
def eltText(elt):
result = ""
child = elt.firstChild
while child:
if child.nodeType == elt.TEXT_NODE:
if result:
result += " "
result += child.nodeValue
child = child.nextSibling
return result
def loadLanguageMap(doc):
result = {}
language_list_elt = firstChildElt(doc.documentElement, "languageList")
language_elt = firstChildElt(language_list_elt, "language")
while language_elt:
language_id = int(eltText(firstChildElt(language_elt, "id")))
language_name = eltText(firstChildElt(language_elt, "name"))
language_code = eltText(firstChildElt(language_elt, "code"))
result[language_id] = (language_name, language_code)
language_elt = nextSiblingElt(language_elt, "language")
return result
def loadCountryMap(doc):
result = {}
country_list_elt = firstChildElt(doc.documentElement, "countryList")
country_elt = firstChildElt(country_list_elt, "country")
while country_elt:
country_id = int(eltText(firstChildElt(country_elt, "id")))
country_name = eltText(firstChildElt(country_elt, "name"))
country_code = eltText(firstChildElt(country_elt, "code"))
result[country_id] = (country_name, country_code)
country_elt = nextSiblingElt(country_elt, "country")
return result
def loadDefaultMap(doc):
result = {}
list_elt = firstChildElt(doc.documentElement, "defaultCountryList")
elt = firstChildElt(list_elt, "defaultCountry")
while elt:
country = eltText(firstChildElt(elt, "country"));
language = eltText(firstChildElt(elt, "language"));
result[language] = country;
elt = nextSiblingElt(elt, "defaultCountry");
return result
def fixedCountryName(name, dupes):
if name in dupes:
return name + "Country"
return name
def fixedLanguageName(name, dupes):
if name in dupes:
return name + "Language"
return name
def findDupes(country_map, language_map):
country_set = set([ v[0] for a, v in country_map.iteritems() ])
language_set = set([ v[0] for a, v in language_map.iteritems() ])
return country_set & language_set
def languageNameToId(name, language_map):
for key in language_map.keys():
if language_map[key][0] == name:
return key
return -1
def countryNameToId(name, country_map):
for key in country_map.keys():
if country_map[key][0] == name:
return key
return -1
def convertFormat(format):
result = ""
i = 0
while i < len(format):
if format[i] == "'":
result += "'"
i += 1
while i < len(format) and format[i] != "'":
result += format[i]
i += 1
if i < len(format):
result += "'"
i += 1
else:
s = format[i:]
if s.startswith("EEEE"):
result += "dddd"
i += 4
elif s.startswith("EEE"):
result += "ddd"
i += 3
elif s.startswith("a"):
result += "AP"
i += 1
elif s.startswith("z"):
result += "t"
i += 1
elif s.startswith("v"):
i += 1
else:
result += format[i]
i += 1
return result
class Locale:
def __init__(self, elt):
self.language = eltText(firstChildElt(elt, "language"))
self.country = eltText(firstChildElt(elt, "country"))
self.decimal = int(eltText(firstChildElt(elt, "decimal")))
self.group = int(eltText(firstChildElt(elt, "group")))
self.listDelim = int(eltText(firstChildElt(elt, "list")))
self.percent = int(eltText(firstChildElt(elt, "percent")))
self.zero = int(eltText(firstChildElt(elt, "zero")))
self.minus = int(eltText(firstChildElt(elt, "minus")))
self.plus = int(eltText(firstChildElt(elt, "plus")))
self.exp = int(eltText(firstChildElt(elt, "exp")))
self.am = eltText(firstChildElt(elt, "am"))
self.pm = eltText(firstChildElt(elt, "pm"))
self.longDateFormat = convertFormat(eltText(firstChildElt(elt, "longDateFormat")))
self.shortDateFormat = convertFormat(eltText(firstChildElt(elt, "shortDateFormat")))
self.longTimeFormat = convertFormat(eltText(firstChildElt(elt, "longTimeFormat")))
self.shortTimeFormat = convertFormat(eltText(firstChildElt(elt, "shortTimeFormat")))
self.standaloneLongMonths = eltText(firstChildElt(elt, "standaloneLongMonths"))
self.standaloneShortMonths = eltText(firstChildElt(elt, "standaloneShortMonths"))
self.standaloneNarrowMonths = eltText(firstChildElt(elt, "standaloneNarrowMonths"))
self.longMonths = eltText(firstChildElt(elt, "longMonths"))
self.shortMonths = eltText(firstChildElt(elt, "shortMonths"))
self.narrowMonths = eltText(firstChildElt(elt, "narrowMonths"))
self.standaloneLongDays = eltText(firstChildElt(elt, "standaloneLongDays"))
self.standaloneShortDays = eltText(firstChildElt(elt, "standaloneShortDays"))
self.standaloneNarrowDays = eltText(firstChildElt(elt, "standaloneNarrowDays"))
self.longDays = eltText(firstChildElt(elt, "longDays"))
self.shortDays = eltText(firstChildElt(elt, "shortDays"))
self.narrowDays = eltText(firstChildElt(elt, "narrowDays"))
def loadLocaleMap(doc, language_map, country_map):
result = {}
locale_list_elt = firstChildElt(doc.documentElement, "localeList")
locale_elt = firstChildElt(locale_list_elt, "locale")
while locale_elt:
locale = Locale(locale_elt)
language_id = languageNameToId(locale.language, language_map)
country_id = countryNameToId(locale.country, country_map)
result[(language_id, country_id)] = locale
locale_elt = nextSiblingElt(locale_elt, "locale")
return result
def compareLocaleKeys(key1, key2):
if key1 == key2:
return 0
if key1[0] == key2[0]:
l1 = compareLocaleKeys.locale_map[key1]
l2 = compareLocaleKeys.locale_map[key2]
if l1.language in compareLocaleKeys.default_map:
default = compareLocaleKeys.default_map[l1.language]
if l1.country == default:
return -1
if l2.country == default:
return 1
else:
return key1[0] - key2[0]
return key1[1] - key2[1]
def languageCount(language_id, locale_map):
result = 0
for key in locale_map.keys():
if key[0] == language_id:
result += 1
return result
class StringDataToken:
def __init__(self, index, length):
self.index = index
self.length = length
def __str__(self):
return " %d,%d " % (self.index, self.length)
class StringData:
def __init__(self):
self.data = []
self.hash = {}
def append(self, s):
if s in self.hash:
return self.hash[s]
lst = map(lambda x: hex(ord(x)), s)
token = StringDataToken(len(self.data), len(lst))
self.hash[s] = token
self.data += lst
return token
def escapedString(s):
result = ""
i = 0
while i < len(s):
if s[i] == '"':
result += '\\"'
i += 1
else:
result += s[i]
i += 1
s = result
line = ""
need_escape = False
result = ""
for c in s:
if ord(c) < 128 and (not need_escape or ord(c.lower()) < ord('a') or ord(c.lower()) > ord('f')):
line += c
need_escape = False
else:
line += "\\x%02x" % (ord(c))
need_escape = True
if len(line) > 80:
result = result + "\n" + "\"" + line + "\""
line = ""
line += "\\0"
result = result + "\n" + "\"" + line + "\""
if result[0] == "\n":
result = result[1:]
return result
def printEscapedString(s):
print escapedString(s);
def main():
doc = xml.dom.minidom.parse("locale.xml")
language_map = loadLanguageMap(doc)
country_map = loadCountryMap(doc)
default_map = loadDefaultMap(doc)
locale_map = loadLocaleMap(doc, language_map, country_map)
dupes = findDupes(language_map, country_map)
# Language enum
print "enum Language {"
language = ""
for key in language_map.keys():
language = fixedLanguageName(language_map[key][0], dupes)
print " " + language + " = " + str(key) + ","
print " LastLanguage = " + language
print "};"
print
# Country enum
print "enum Country {"
country = ""
for key in country_map.keys():
country = fixedCountryName(country_map[key][0], dupes)
print " " + country + " = " + str(key) + ","
print " LastCountry = " + country
print "};"
print
# Locale index
print "static const uint locale_index[] = {"
print " 0, // unused"
index = 0
for key in language_map.keys():
i = 0
count = languageCount(key, locale_map)
if count > 0:
i = index
index += count
print "%6d, // %s" % (i, language_map[key][0])
print " 0 // trailing 0"
print "};"
print
date_format_data = StringData()
time_format_data = StringData()
months_data = StringData()
standalone_months_data = StringData()
days_data = StringData()
am_data = StringData()
pm_data = StringData()
# Locale data
print "static const QLocalePrivate locale_data[] = {"
print "// lang terr dec group list prcnt zero minus plus exp sDtFmt lDtFmt sTmFmt lTmFmt ssMonth slMonth sMonth lMonth sDays lDays am,len pm,len"
locale_keys = locale_map.keys()
compareLocaleKeys.default_map = default_map
compareLocaleKeys.locale_map = locale_map
locale_keys.sort(compareLocaleKeys)
for key in locale_keys:
l = locale_map[key]
print " { %6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s }, // %s/%s" \
% (key[0], key[1],
l.decimal,
l.group,
l.listDelim,
l.percent,
l.zero,
l.minus,
l.plus,
l.exp,
date_format_data.append(l.shortDateFormat),
date_format_data.append(l.longDateFormat),
time_format_data.append(l.shortTimeFormat),
time_format_data.append(l.longTimeFormat),
standalone_months_data.append(l.standaloneShortMonths),
standalone_months_data.append(l.standaloneLongMonths),
standalone_months_data.append(l.standaloneNarrowMonths),
months_data.append(l.shortMonths),
months_data.append(l.longMonths),
months_data.append(l.narrowMonths),
days_data.append(l.standaloneShortDays),
days_data.append(l.standaloneLongDays),
days_data.append(l.standaloneNarrowDays),
days_data.append(l.shortDays),
days_data.append(l.longDays),
days_data.append(l.narrowDays),
am_data.append(l.am),
pm_data.append(l.pm),
l.language,
l.country)
print " { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 } // trailing 0s"
print "};"
print
# Date format data
#check_static_char_array_length("date_format", date_format_data.data)
print "static const ushort date_format_data[] = {"
print wrap_list(date_format_data.data)
print "};"
print
# Time format data
#check_static_char_array_length("time_format", time_format_data.data)
print "static const ushort time_format_data[] = {"
print wrap_list(time_format_data.data)
print "};"
print
# Months data
#check_static_char_array_length("months", months_data.data)
print "static const ushort months_data[] = {"
print wrap_list(months_data.data)
print "};"
print
# Standalone months data
#check_static_char_array_length("standalone_months", standalone_months_data.data)
print "static const ushort standalone_months_data[] = {"
print wrap_list(standalone_months_data.data)
print "};"
print
# Days data
#check_static_char_array_length("days", days_data.data)
print "static const ushort days_data[] = {"
print wrap_list(days_data.data)
print "};"
print
# AM data
#check_static_char_array_length("am", am_data.data)
print "static const ushort am_data[] = {"
print wrap_list(am_data.data)
print "};"
print
# PM data
#check_static_char_array_length("pm", am_data.data)
print "static const ushort pm_data[] = {"
print wrap_list(pm_data.data)
print "};"
print
# Language name list
print "static const char language_name_list[] ="
print "\"Default\\0\""
for key in language_map.keys():
print "\"" + language_map[key][0] + "\\0\""
print ";"
print
# Language name index
print "static const uint language_name_index[] = {"
print " 0, // Unused"
index = 8
for key in language_map.keys():
language = language_map[key][0]
print "%6d, // %s" % (index, language)
index += len(language) + 1
print "};"
print
# Country name list
print "static const char country_name_list[] ="
print "\"Default\\0\""
for key in country_map.keys():
if key == 0:
continue
print "\"" + country_map[key][0] + "\\0\""
print ";"
print
# Country name index
print "static const uint country_name_index[] = {"
print " 0, // AnyCountry"
index = 8
for key in country_map.keys():
if key == 0:
continue
country = country_map[key][0]
print "%6d, // %s" % (index, country)
index += len(country) + 1
print "};"
print
# Language code list
print "static const unsigned char language_code_list[] ="
print "\" \\0\" // Unused"
for key in language_map.keys():
code = language_map[key][1]
if len(code) == 2:
code += r"\0"
print "\"%2s\" // %s" % (code, language_map[key][0])
print ";"
print
# Country code list
print "static const unsigned char country_code_list[] ="
for key in country_map.keys():
print "\"%2s\" // %s" % (country_map[key][1], country_map[key][0])
print ";"
if __name__ == "__main__":
main()
| gpl-2.0 | 4,154,200,599,349,329,400 | 32.661142 | 248 | 0.57747 | false | 3.660725 | false | false | false |
turdusmerula/kipartman | kipartbase/swagger_server/models/part_offer_data.py | 1 | 7328 | # coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class PartOfferData(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, packaging_unit=None, quantity=None, min_order_quantity=None, unit_price=None, available_stock=None, packaging=None, currency=None, sku=None, updated=None):
"""
PartOfferData - a model defined in Swagger
:param packaging_unit: The packaging_unit of this PartOfferData.
:type packaging_unit: int
:param quantity: The quantity of this PartOfferData.
:type quantity: int
:param min_order_quantity: The min_order_quantity of this PartOfferData.
:type min_order_quantity: int
:param unit_price: The unit_price of this PartOfferData.
:type unit_price: float
:param available_stock: The available_stock of this PartOfferData.
:type available_stock: int
:param packaging: The packaging of this PartOfferData.
:type packaging: str
:param currency: The currency of this PartOfferData.
:type currency: str
:param sku: The sku of this PartOfferData.
:type sku: str
:param updated: The updated of this PartOfferData.
:type updated: str
"""
self.swagger_types = {
'packaging_unit': int,
'quantity': int,
'min_order_quantity': int,
'unit_price': float,
'available_stock': int,
'packaging': str,
'currency': str,
'sku': str,
'updated': str
}
self.attribute_map = {
'packaging_unit': 'packaging_unit',
'quantity': 'quantity',
'min_order_quantity': 'min_order_quantity',
'unit_price': 'unit_price',
'available_stock': 'available_stock',
'packaging': 'packaging',
'currency': 'currency',
'sku': 'sku',
'updated': 'updated'
}
self._packaging_unit = packaging_unit
self._quantity = quantity
self._min_order_quantity = min_order_quantity
self._unit_price = unit_price
self._available_stock = available_stock
self._packaging = packaging
self._currency = currency
self._sku = sku
self._updated = updated
@classmethod
def from_dict(cls, dikt):
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PartOfferData of this PartOfferData.
:rtype: PartOfferData
"""
return deserialize_model(dikt, cls)
@property
def packaging_unit(self):
"""
Gets the packaging_unit of this PartOfferData.
:return: The packaging_unit of this PartOfferData.
:rtype: int
"""
return self._packaging_unit
@packaging_unit.setter
def packaging_unit(self, packaging_unit):
"""
Sets the packaging_unit of this PartOfferData.
:param packaging_unit: The packaging_unit of this PartOfferData.
:type packaging_unit: int
"""
self._packaging_unit = packaging_unit
@property
def quantity(self):
"""
Gets the quantity of this PartOfferData.
:return: The quantity of this PartOfferData.
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""
Sets the quantity of this PartOfferData.
:param quantity: The quantity of this PartOfferData.
:type quantity: int
"""
self._quantity = quantity
@property
def min_order_quantity(self):
"""
Gets the min_order_quantity of this PartOfferData.
:return: The min_order_quantity of this PartOfferData.
:rtype: int
"""
return self._min_order_quantity
@min_order_quantity.setter
def min_order_quantity(self, min_order_quantity):
"""
Sets the min_order_quantity of this PartOfferData.
:param min_order_quantity: The min_order_quantity of this PartOfferData.
:type min_order_quantity: int
"""
self._min_order_quantity = min_order_quantity
@property
def unit_price(self):
"""
Gets the unit_price of this PartOfferData.
:return: The unit_price of this PartOfferData.
:rtype: float
"""
return self._unit_price
@unit_price.setter
def unit_price(self, unit_price):
"""
Sets the unit_price of this PartOfferData.
:param unit_price: The unit_price of this PartOfferData.
:type unit_price: float
"""
self._unit_price = unit_price
@property
def available_stock(self):
"""
Gets the available_stock of this PartOfferData.
:return: The available_stock of this PartOfferData.
:rtype: int
"""
return self._available_stock
@available_stock.setter
def available_stock(self, available_stock):
"""
Sets the available_stock of this PartOfferData.
:param available_stock: The available_stock of this PartOfferData.
:type available_stock: int
"""
self._available_stock = available_stock
@property
def packaging(self):
"""
Gets the packaging of this PartOfferData.
:return: The packaging of this PartOfferData.
:rtype: str
"""
return self._packaging
@packaging.setter
def packaging(self, packaging):
"""
Sets the packaging of this PartOfferData.
:param packaging: The packaging of this PartOfferData.
:type packaging: str
"""
self._packaging = packaging
@property
def currency(self):
"""
Gets the currency of this PartOfferData.
:return: The currency of this PartOfferData.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""
Sets the currency of this PartOfferData.
:param currency: The currency of this PartOfferData.
:type currency: str
"""
self._currency = currency
@property
def sku(self):
"""
Gets the sku of this PartOfferData.
:return: The sku of this PartOfferData.
:rtype: str
"""
return self._sku
@sku.setter
def sku(self, sku):
"""
Sets the sku of this PartOfferData.
:param sku: The sku of this PartOfferData.
:type sku: str
"""
self._sku = sku
@property
def updated(self):
"""
Gets the updated of this PartOfferData.
:return: The updated of this PartOfferData.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this PartOfferData.
:param updated: The updated of this PartOfferData.
:type updated: str
"""
self._updated = updated
| gpl-3.0 | 7,979,520,396,145,676,000 | 25.941176 | 178 | 0.586517 | false | 4.35413 | false | false | false |
sc3/cookcountyjail | scraper/inmates_scraper.py | 1 | 1938 | from monitor import MONITOR_VERBOSE_DMSG_LEVEL
from concurrent_base import ConcurrentBase
WORKERS_TO_START = 25
CCJ_INMATE_DETAILS_URL = 'http://www2.cookcountysheriff.org/search2/details.asp?jailnumber='
class InmatesScraper(ConcurrentBase):
def __init__(self, http, inmates, inmate_details_class, monitor, workers_to_start=WORKERS_TO_START):
super(InmatesScraper, self).__init__(monitor, workers_to_start)
self._http = http
self._inmates = inmates
self._inmate_details_class = inmate_details_class
def create_if_exists(self, arg):
self._put(self._create_if_exists, arg)
def _create_if_exists(self, inmate_id):
self._debug('check for inmate - %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._inmates.add(inmate_id, self._inmate_details_class(inmate_details_in_html))
def resurrect_if_found(self, inmate_id):
self._put(self._resurrect_if_found, inmate_id)
def _resurrect_if_found(self, inmate_id):
self._debug('check if really discharged inmate %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._debug('resurrected discharged inmate %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
self._inmates.update(inmate_id, self._inmate_details_class(inmate_details_in_html))
def update_inmate_status(self, inmate_id):
self._put(self._update_inmate_status, inmate_id)
def _update_inmate_status(self, inmate_id):
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._inmates.update(inmate_id, self._inmate_details_class(inmate_details_in_html))
else:
self._inmates.discharge(inmate_id)
| gpl-3.0 | -4,363,237,560,352,292,400 | 42.066667 | 104 | 0.674923 | false | 2.945289 | false | false | false |
jcchoiling/learningPython | s13/Day12/rabbit_rpc_server.py | 1 | 1296 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Janice Cheng
"""
把诘个服务器端改成在一个类中
"""
import pika
import subprocess
credentials = pika.PlainCredentials('janice', 'janice123')
parameters = pika.ConnectionParameters('172.16.201.134', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
# 把这里改成 subprocess 来执行这条命令
def cmd_func(cmd):
cmd_data = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return cmd_data.stdout.read()
def on_request(ch, method, props, body):
n = body.decode()
print(" [.] Calling (%s)" % n)
response = cmd_func(n)
response = str(response,encoding='utf-8')
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=response)
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue') #在对列里获取数据
print(" [x] Awaiting RPC requests")
channel.start_consuming() | gpl-3.0 | 7,917,972,075,697,951,000 | 25.717391 | 95 | 0.648208 | false | 3.164948 | false | false | false |
Roel/Gyrid | gyrid/core.py | 1 | 1416 | #-*- coding: utf-8 -*-
#
# This file belongs to Gyrid.
#
# Gyrid is a mobile device scanner.
# Copyright (C) 2013 Roel Huybrechts
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
def threaded(f):
"""
Wrapper to start a function within a new thread.
@param f The function to run inside the thread.
"""
def wrapper(*args):
t = threading.Thread(target=f, args=args)
t.start()
return wrapper
class ScanProtocol(object):
def __init__(self, mgr):
self.mgr = mgr
def hardware_added(self):
pass
def hardware_removed(self):
pass
class Scanner(object):
def __init__(self, mgr, protocol):
self.mgr = mgr
self.protocol = protocol
def start_scanning(self):
pass
def stop_scanning(self):
pass
| gpl-3.0 | -5,951,183,939,177,816,000 | 25.716981 | 71 | 0.673729 | false | 3.879452 | false | false | false |
Hossein-Noroozpour/PyHGEE | core/HGEMesh.py | 1 | 1774 | # coding=utf-8
"""
Module for handling OpenGL buffers.
"""
__author__ = "Hossein Noroozpour"
from OpenGL import GL
import ctypes
class Mesh():
"""
A class that hold mesh information about an actor
"""
def __init__(self, elements, indices):
temp_list = [0]
# noinspection PyCallingNonCallable
self.vbo = (ctypes.c_uint32 * 1)(*temp_list)
# noinspection PyCallingNonCallable
self.ibo = (ctypes.c_uint32 * 1)(*temp_list)
GL.glGenBuffers(1, self.vbo)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
# noinspection PyCallingNonCallable
GL.glBufferData(
GL.GL_ARRAY_BUFFER,
len(elements) * 4,
(ctypes.c_float * len(elements))(*elements),
GL.GL_STATIC_DRAW
)
GL.glGenBuffers(1, self.ibo)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
# noinspection PyCallingNonCallable
GL.glBufferData(
GL.GL_ELEMENT_ARRAY_BUFFER,
len(indices) * 4,
(ctypes.c_uint32 * len(indices))(*indices),
GL.GL_STATIC_DRAW
)
self.indices_number = ctypes.c_uint32(len(indices))
def __del__(self):
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, ctypes.c_uint32(0))
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, ctypes.c_uint32(0))
GL.glDeleteBuffers(1, self.vbo)
GL.glDeleteBuffers(1, self.ibo)
def bind(self):
"""
Bind itself.
"""
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
def draw(self):
"""
Draw.
"""
GL.glDrawElements(GL.GL_TRIANGLES, self.indices_number, GL.GL_UNSIGNED_INT, ctypes.c_uint32(0)) | mit | -570,652,836,376,594,700 | 30.140351 | 103 | 0.590192 | false | 3.340866 | false | false | false |
ajbouh/tfi | src/tfi/driver/tf/doc.py | 1 | 12857 | import tfi.json
import tensorflow as tf
import os.path
import tfi.data
import tfi.doc
from google.protobuf.json_format import ParseDict
from tfi.parse.docstring import GoogleDocstring
def _detect_method_documentation(*, bibliographer, model, method_name, method, signature_def):
# NOTE(adamb) Since we don't want to be parsing rst here, we'll just rewrite
# it to include detected citations. Expect that this rst will be parsed
# for real when rendering HTML.
docstr = GoogleDocstring(obj=method).result()
docstr_sections = docstr['sections']
text_sections = [v for k, v in docstr_sections if k == 'text']
overview = "\n".join([l for t in text_sections for l in t])
docstr['args'] = _enrich_docs_with_tensor_info(docstr['args'], signature_def.inputs)
docstr['returns'] = _enrich_docs_with_tensor_info(docstr['returns'], signature_def.outputs)
return tfi.doc.MethodDocumentation(
name=method_name,
overview=bibliographer.rewrite(overview),
inputs=docstr['args'],
outputs=docstr['returns'],
examples=[
tfi.doc.MethodDataDocumentation.generate(
method=getattr(model, method_name),
inputs={
input_name: eval("\n".join(input_val_lines), {}, {'m': model, 'tfi': tfi})
for input_name, _, input_val_lines in docstr['example args']
},
),
],
)
def detect_model_documentation(model):
source = tfi.doc.ModelSource.detect(model)
bibliographer = tfi.doc.Bibliographer()
def maybeattr(o, attr, default=None):
return getattr(o, attr) if o and hasattr(o, attr) else default
# NOTE(adamb) Since we don't want to be parsing rst here, we'll just rewrite
# it to include detected citations. Expect that this rst will be parsed
# for real when rendering HTML.
model_docstr = GoogleDocstring(obj=model).result()
model_docstr_sections = model_docstr['sections']
text_sections = [v for k, v in model_docstr_sections if k == 'text']
overview = "\n".join([l for t in text_sections for l in t])
return tfi.doc.ModelDocumentation(
name=maybeattr(model, '__name__', type(model).__name__),
hyperparameters=maybeattr(model, '__tfi_hyperparameters__', []),
overview=bibliographer.rewrite(overview),
implementation_notes=[],
authors=[
*[
{
"name": author['name'],
"url": author['url'],
"role_noun": "Commits",
"role_url": author['commits_url'],
}
for author in maybeattr(source, 'authors', [])
],
],
source=source,
facets_overview_proto=maybeattr(model, '__tfi_facets_overview_proto__'),
methods=[
_detect_method_documentation(
model=model,
bibliographer=bibliographer,
method_name=method_name,
method=getattr(model, method_name),
signature_def=signature_def,
)
for method_name, signature_def in maybeattr(model, '__tfi_signature_defs__').items()
],
references=bibliographer.references(),
)
def _tensor_info_str(tensor):
if tensor.shape.ndims is None:
return '%s ?' % tensor.dtype.name
return '%s <%s>' % (
tensor.dtype.name,
', '.join(['?' if n is None else str(n) for n in tensor.shape.as_list()]),
)
def _enrich_docs_with_tensor_info(doc_fields, tensor_dict):
existing = {k: v for k, _, v in doc_fields}
return [
(name, _tensor_info_str(tensor), existing.get(name, ''))
for name, tensor in tensor_dict.items()
]
class MethodDocumentationLayout(object):
def __init__(self, base_path, assets_extra_path):
self.assets_extra_path = assets_extra_path
self.metadata_path = os.path.join(base_path, 'metadata.json')
self._base_path = base_path
def file(self, subpath):
return os.path.join(self._base_path, subpath)
class ModelDocumentationLayout(object):
def __init__(self, model_dir):
self.basename = os.path.basename(model_dir)
self.assets_extra_path = os.path.join(model_dir, 'assets.extra')
self.doc_path = os.path.join(self.assets_extra_path, 'doc')
self.metadata_path = os.path.join(self.doc_path, 'metadata.json')
self.methods_path = os.path.join(self.doc_path, 'methods')
def method(self, method_name):
return MethodDocumentationLayout(
os.path.join(self.methods_path, method_name),
self.assets_extra_path,
)
def _read_json_else(path, default):
if not os.path.exists(path):
return default
with open(path) as f:
return tfi.json.load(f)
def _write_json(path, obj):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
tfi.json.dump(obj, f)
class MethodDataDocumentationCodec(object):
def __init__(self, method_layout):
self._layout = method_layout
def write(self, method_example):
_write_json(
self._layout.file('inputs.json'),
{
name: value
for name, value in method_example.inputs().items()
}
)
if method_example.outputs() is not None:
_write_json(
self._layout.file('outputs.json'),
{
name: value
for name, value in method_example.outputs().items()
}
)
def read(self, signature_def):
return tfi.doc.MethodDataDocumentation(
inputs=self._detect(
lambda: self._read_json_tf_example_from(
signature_def.inputs,
'inputs.pb.json',
),
lambda: self._read_json_example_from(
signature_def.inputs,
'inputs.json',
),
),
outputs=self._detect(
lambda: self._read_json_tf_example_from(
signature_def.outputs,
'outputs.pb.json',
),
lambda: self._read_json_example_from(
signature_def.outputs,
'outputs.json',
),
),
# 'example result': _detect(
# lambda: _read_json_tf_example_from(
# signature_def.outputs,
# 'outputs.pb.json',
# ),
# lambda: _read_json_example_from(
# signature_def.outputs,
# 'outputs.json',
# ),
# ),
)
def _detect(self, *fns):
for fn in fns:
result = fn()
if result is not None:
return result
def _feature_for_tensor_info(self, tensor_info):
tensor_shape = tensor_info.tensor_shape.dim[1:]
dtype = tf.DType(tensor_info.dtype)
if tensor_shape[-1].size != -1:
return tf.FixedLenFeature(dtype=dtype, shape=[dim.size for dim in tensor_shape])
return tf.VarLenFeature(dtype=dtype)
def _read_json_tf_example_from(self, tensor_infos, subpath):
path = self._layout.file(subpath)
if not os.path.exists(path):
return None
with open(path) as f:
example_dict = tfi.json.load(f)
with tf.Session(graph=tf.Graph()) as session:
example_features = {
name: self._feature_for_tensor_info(tensor_info)
for name, tensor_info in tensor_infos.items()
}
return session.run(
tf.parse_single_example(
ParseDict(example_dict, tf.train.Example()).SerializeToString(),
features=example_features))
def _read_json_example_from(self, tensor_infos, subpath):
path = self._layout.file(subpath)
if not os.path.exists(path):
return None
with open(path) as f:
return tfi.data.json(
f.read(),
assets_extra_root=self._layout.assets_extra_path)
class MethodDocumentationCodec(object):
def __init__(self, method_name, method_layout):
self._name = method_name
self._layout = method_layout
def write(self, method_doc):
metadata = {
'documentation': {
'inputs': {
name: doc
for name, tensor_info, doc in method_doc.inputs()
},
'outputs': {
name: doc
for name, tensor_info, doc in method_doc.outputs()
},
},
}
MethodDataDocumentationCodec(self._layout).write(method_doc.examples()[0]),
_write_json(self._layout.metadata_path, metadata)
def read(self, signature_def):
metadata = _read_json_else(self._layout.metadata_path, {})
doc = metadata.get('documentation', {})
doc_inputs = doc.get('inputs', {})
doc_outputs = doc.get('outputs', {})
return tfi.doc.MethodDocumentation(
name=self._name,
overview=metadata.get('overview', None),
inputs=[
(name, self._tensor_info_str(ti), doc_inputs.get(name, ''))
for name, ti in signature_def.inputs.items()
],
outputs=[
(name, self._tensor_info_str(ti), doc_outputs.get(name, ''))
for name, ti in signature_def.outputs.items()
],
examples=[
MethodDataDocumentationCodec(self._layout).read(signature_def),
],
)
def _tensor_info_str(self, tensor_info):
if tensor_info.tensor_shape.unknown_rank:
return '%s ?' % tf.as_dtype(tensor_info.dtype).name
return '%s <%s>' % (
tf.as_dtype(tensor_info.dtype).name,
', '.join([
'?' if dim.size == -1 else str(dim.size)
for dim in tensor_info.tensor_shape.dim
]),
)
class ModelDocumentationCodec(object):
def __init__(self, path):
self._layout = ModelDocumentationLayout(path)
def _method_codecs(self, method_names):
return [
(
method_name,
MethodDocumentationCodec(
method_name,
self._layout.method(method_name),
)
)
for method_name in method_names
]
def write(self, model_doc):
metadata = {
'name': model_doc.name(),
'overview': model_doc.overview(),
'hyperparameters': [
(name, str(val_type), val, docs)
for name, val_type, val, docs in model_doc.hyperparameters()
],
'authors': model_doc.authors(),
'references': model_doc.references(),
'implementation_notes': model_doc.implementation_notes(),
'source': model_doc.source(),
'facets_overview_proto': None, # model_doc.facets_overview_proto(),
}
methods = model_doc.methods()
for method_name, method_codec in self._method_codecs(methods.keys()):
method_codec.write(methods[method_name])
_write_json(self._layout.metadata_path, metadata)
def read(self, signature_defs):
metadata = _read_json_else(self._layout.metadata_path, {})
return tfi.doc.ModelDocumentation(
# TODO(adamb) Should be transformed to the below structure, with val_type_str -> val_type
# (name, val_type, val, docs)
hyperparameters=metadata.get('hyperparameters', []),
name=metadata.get('name', self._layout.basename),
overview=metadata.get('overview', None),
methods=[
method_codec.read(signature_defs[method_name])
for method_name, method_codec in self._method_codecs(signature_defs.keys())
],
authors=metadata.get('authors', []),
references=metadata.get('references', {}),
implementation_notes=metadata.get('implementation_notes', []),
source=metadata.get('source', []),
facets_overview_proto=None,
)
def read(path, signature_defs):
return ModelDocumentationCodec(path).read(signature_defs)
def write(path, model_doc):
return ModelDocumentationCodec(path).write(model_doc) | mit | 4,932,963,218,364,086,000 | 35.842407 | 101 | 0.542584 | false | 4.09589 | false | false | false |
commaai/openpilot | selfdrive/manager/helpers.py | 1 | 1058 | import os
import sys
import fcntl
import errno
import signal
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
| mit | -6,387,018,691,051,045,000 | 26.842105 | 98 | 0.648393 | false | 3.574324 | false | false | false |
ONSdigital/ras-frontstage | frontstage/error_handlers.py | 1 | 2963 | import logging
from flask import render_template, request, url_for
from flask_wtf.csrf import CSRFError
from requests.exceptions import ConnectionError
from structlog import wrap_logger
from werkzeug.utils import redirect
from frontstage import app
from frontstage.common.session import Session
from frontstage.exceptions.exceptions import (
ApiError,
IncorrectAccountAccessError,
InvalidEqPayLoad,
JWTValidationError,
)
logger = wrap_logger(logging.getLogger(__name__))
@app.errorhandler(400)
def client_error(error):
logger.info("Client error", url=request.url, status_code=error.code)
return render_template("errors/400-error.html"), 400
@app.errorhandler(404)
def not_found_error(error):
logger.info("Not found error", url=request.url, status_code=error.code)
return render_template("errors/404-error.html"), 404
@app.errorhandler(CSRFError)
def handle_csrf_error(error):
logger.warning("CSRF token has expired", error_message=error.description, status_code=error.code)
session_key = request.cookies.get("authorization")
session_handler = Session.from_session_key(session_key)
encoded_jwt = session_handler.get_encoded_jwt()
if not encoded_jwt:
return render_template("errors/400-error.html"), 400
else:
return redirect(url_for("sign_in_bp.logout", csrf_error=True, next=request.url))
@app.errorhandler(ApiError)
def api_error(error):
logger.error(
error.message or "Api failed to retrieve required data",
url=request.url,
status_code=500,
api_url=error.url,
api_status_code=error.status_code,
**error.kwargs
)
return render_template("errors/500-error.html"), 500
@app.errorhandler(ConnectionError)
def connection_error(error):
logger.error("Failed to connect to external service", url=request.url, status_code=500, api_url=error.request.url)
return render_template("errors/500-error.html"), 500
@app.errorhandler(JWTValidationError)
def jwt_validation_error(error):
logger.error("JWT validation error", url=request.url, status_code=403)
return render_template("errors/403-error.html"), 403
@app.errorhandler(Exception)
def server_error(error):
logger.error("Generic exception generated", exc_info=error, url=request.url, status_code=500)
return render_template("errors/500-error.html"), getattr(error, "code", 500)
@app.errorhandler(InvalidEqPayLoad)
def eq_error(error):
logger.error("Failed to generate EQ URL", error=error.message, url=request.url, status_code=500)
return render_template("errors/500-error.html"), 500
@app.errorhandler(IncorrectAccountAccessError)
def secure_message_forbidden_error(error):
logger.info(
"Attempt to access secure message without correct session permission",
url=request.url,
message=error.message,
thread_id=error.thread,
)
return render_template("errors/403-incorrect-account-error.html")
| mit | 3,384,516,966,222,632,400 | 31.56044 | 118 | 0.733378 | false | 3.640049 | false | false | false |
manmedia/PythonToggleCharacterCases | switchLetterCase.py | 1 | 4097 | #
# A module capable of changing alphabet letter cases.
#
# It uses very generic Python functionality to ensure
# backward compatibility.
#
#
# The programme processes a set of characters by default
# If no character is entered for processing, the programme
# simply exists. This can be turned off by setting 'a' to 1
# (for all vowels) or 2 (for all consonants).
#
#
#
import os;
import sys;
import re;
import string;
from re import sub;
#
#! Get parsed arguments
#
def get_parsed_args():
# Pre-allocate
parser = "";
args = "";
if sys.version_info < (2,7):
from optparse import OptionParser
parser = OptionParser();
parser.add_option("-i", "--input_path", type=str, help="Input file path with extension");
parser.add_option("-o", "--output_path", type=str, help="Output file path with extension");
parser.add_option("-a", "--all_chars", type=int, help="Switch a type of characters (all vowels or cons.), disable=0, vowel=1, cons=2", default=0);
parser.add_option("-c", "--c", type=str, help="Characters to process (comma-separated list, no whitespace)", default="");
else:
from argparse import ArgumentParser
parser = ArgumentParser();
parser.add_argument("-i", "--input_path", type=str, help="Input file path with extension");
parser.add_argument("-o", "--output_path", type=str, help="Output file path with extension");
parser.add_argument("-a", "--all_chars", type=int, help="Switch a type of characters (all vowels or cons.), disable=0, vowel=1, cons=2", default=0);
parser.add_argument("-c", "--c", type=str, help="Characters to process (comma-separated list, no whitespace)", default="");
args = parser.parse_args();
args = vars(args);
##print(option)
##print(args)
##print(type(option))
##print(option.c)
##print(option.all_chars)
##print(option.input_path)
##print(option.output_path)
# Safety assertions
assert (args['all_chars'] >= 0 and args['all_chars'] <= 2), \
"Invalid value! programme exiting!\n type python switchLetterCase.py -h for information on arguments"
# If nothing to process, programme will exit
if (args['all_chars'] == 0) and \
((args['c'] == "") or \
(args['c'] == " ") or \
args['all_chars'] is None or \
all([x is ',' for x in args['c']])):
print(".....Nothing to process, programme exiting.\n\n");
sys.exit(0);
return args;
#
#! Main processor function
#
def process_files(args):
try:
# Get handlers
f1 = open(args['input_path'], 'r')
f2 = open(args['output_path'], 'w');
# Initial setup
line_to_write = ""
if (args['all_chars'] == 0): # process characters in the list
gg = "".join(args['c'])
for line in f1:
g = [y.upper() if y in gg else y.lower() if y.upper() in gg else y for y in line];
line_to_write = "".join(g);
f2.write(line_to_write);
elif (args['all_chars'] == 1): # process vowels only
vowels = sub('[^aeiou]+','',string.ascii_lowercase)
for line in f1:
g = [y.upper() if y in vowels else y.lower() if y.upper() in vowels else y for y in line];
line_to_write = "".join(g);
f2.write(line_to_write);
elif (args['all_chars'] == 0): # process consonants in the list
consonants = sub('[aeiou]+','',string.ascii_lowercase)
for line in f1:
g = [y.upper() if y in gg else y.lower() if y.upper() in gg else y for y in line];
line_to_write = "".join(g);
f2.write(line_to_write);
# Print some INFO
print("All characters toggled! Terminating programme......\n\n");
f1.close();
f2.close();
except (Exception, BaseException, IOError, ValueError, WindowsError) as e:
print(e);
finally:
del f1, f2
| apache-2.0 | -4,100,419,698,377,441,000 | 31.259843 | 156 | 0.569685 | false | 3.632092 | false | false | false |
Azure/azure-storage-python | azure-storage-common/azure/storage/common/_error.py | 1 | 9025 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from sys import version_info
if version_info < (3,):
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
def _to_str(value):
return _str(value) if value is not None else None
from azure.common import (
AzureHttpError,
AzureConflictHttpError,
AzureMissingResourceHttpError,
AzureException,
)
from ._constants import (
_ENCRYPTION_PROTOCOL_V1,
)
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide an account name and either an account_key or sas_token when creating a storage service.'
_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \
'The emulator does not support the file service.'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.'
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_START_END_NEEDED_FOR_MD5 = \
'Both end_range and start_range need to be specified ' + \
'for getting content MD5.'
_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \
'Getting content MD5 for a range greater than 4MB ' + \
'is not supported.'
_ERROR_MD5_MISMATCH = \
'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'
_ERROR_TOO_MANY_ACCESS_POLICIES = \
'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
_ERROR_OBJECT_INVALID = \
'{0} does not define a complete interface. Value of {1} is either missing or invalid.'
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \
'Encryption version is not supported.'
_ERROR_DECRYPTION_FAILURE = \
'Decryption failed'
_ERROR_ENCRYPTION_REQUIRED = \
'Encryption required but no key was provided.'
_ERROR_DECRYPTION_REQUIRED = \
'Decryption required but neither key nor resolver was provided.' + \
' If you do not want to decypt, please do not set the require encryption flag.'
_ERROR_INVALID_KID = \
'Provided or resolved key-encryption-key does not match the id of key used to encrypt.'
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \
'Specified encryption algorithm is not supported.'
_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \
' for this method.'
_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.'
_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \
'Data was either not encrypted or metadata has been lost.'
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, AzureConflictHttpError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, AzureMissingResourceHttpError):
return False
else:
raise error
def _http_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
error_code = None
if 'x-ms-error-code' in http_error.respheader:
error_code = http_error.respheader['x-ms-error-code']
message += ' ErrorCode: ' + error_code
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
ex = AzureHttpError(message, http_error.status)
ex.error_code = error_code
raise ex
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_type_bytes_or_stream(param_name, param):
if not (isinstance(param, bytes) or hasattr(param, 'read')):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise ValueError(_ERROR_VALUE_NONE.format(param_name))
def _validate_content_match(server_md5, computed_md5):
if server_md5 != computed_md5:
raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5))
def _validate_access_policies(identifiers):
if identifiers and len(identifiers) > 5:
raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES)
def _validate_key_encryption_key_wrap(kek):
# Note that None is not callable and so will fail the second clause of each check.
if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
def _validate_key_encryption_key_unwrap(kek):
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
def _validate_encryption_required(require_encryption, kek):
if require_encryption and (kek is None):
raise ValueError(_ERROR_ENCRYPTION_REQUIRED)
def _validate_decryption_required(require_encryption, kek, resolver):
if (require_encryption and (kek is None) and
(resolver is None)):
raise ValueError(_ERROR_DECRYPTION_REQUIRED)
def _validate_encryption_protocol_version(encryption_protocol):
if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
def _validate_kek_id(kid, resolved_id):
if not (kid == resolved_id):
raise ValueError(_ERROR_INVALID_KID)
def _validate_encryption_unsupported(require_encryption, key_encryption_key):
if require_encryption or (key_encryption_key is not None):
raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
def _validate_user_delegation_key(user_delegation_key):
_validate_not_none('user_delegation_key.signed_oid', user_delegation_key.signed_oid)
_validate_not_none('user_delegation_key.signed_tid', user_delegation_key.signed_tid)
_validate_not_none('user_delegation_key.signed_start', user_delegation_key.signed_start)
_validate_not_none('user_delegation_key.signed_expiry', user_delegation_key.signed_expiry)
_validate_not_none('user_delegation_key.signed_version', user_delegation_key.signed_version)
_validate_not_none('user_delegation_key.signed_service', user_delegation_key.signed_service)
_validate_not_none('user_delegation_key.value', user_delegation_key.value)
# wraps a given exception with the desired exception type
def _wrap_exception(ex, desired_type):
msg = ""
if len(ex.args) > 0:
msg = ex.args[0]
if version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
return desired_type(msg)
else:
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
class AzureSigningError(AzureException):
"""
Represents a fatal error when attempting to sign a request.
In general, the cause of this exception is user error. For example, the given account key is not valid.
Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
"""
pass
| mit | 5,352,687,308,642,604,000 | 40.399083 | 126 | 0.688753 | false | 3.62887 | false | false | false |
Nonse/Feel_Like | reservations/models.py | 1 | 3171 | from django.db import models
class Reservation(models.Model):
start_time = models.DateTimeField()
end_time = models.DateTimeField()
customer = models.ForeignKey('Customer')
coach = models.ForeignKey('Coach')
product = models.ForeignKey('Product')
location = models.CharField(max_length=200)
location_price = models.DecimalField(max_digits=10, decimal_places=2)
participants = models.IntegerField()
amount = models.DecimalField(max_digits=10, decimal_places=2)
invoice = models.ForeignKey('Invoice', null=True, blank=True, on_delete=models.SET_NULL)
def __unicode__(self):
return u'%s-%s' % (self.start_time.strftime('%Y-%m-%d, %H:%M'), self.end_time.strftime('%H:%M'))
class Customer(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(max_length=254)
street_address = models.CharField(max_length=200)
postcode = models.CharField(max_length=5) #Finnish postal code length
city = models.CharField(max_length=100)
phone = models.CharField(max_length=100, null=True, blank=True)
discount = models.DecimalField(max_digits=5, decimal_places=2) #999,99 max
class Meta:
ordering = ['last_name', 'first_name']
def __unicode__(self): #unicode for (finnish) letters
return u'%s, %s' % (self.last_name, self.first_name)
class Coach(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
class Meta:
verbose_name_plural = 'Coaches'
def __unicode__(self):
return u'%s, %s' % (self.last_name, self.first_name)
class Product(models.Model):
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits=10, decimal_places=2)
vat = models.DecimalField(max_digits=10, decimal_places=2)
def __unicode__(self):
return u'%s' % (self.name)
class Invoice(models.Model):
date = models.DateField()
total = models.DecimalField(max_digits=10, decimal_places=2)
customer = models.ForeignKey('Customer', null=True, blank=True)
ref_number = models.CharField(max_length=20, null=True, blank=True)
due_date = models.DateField(null=True, blank=True)
company = models.ForeignKey('Company')
def __unicode__(self):
return u'{}: {}'.format(
self.date.strftime('%Y-%m-%d'), self.customer
)
class Company(models.Model):
name = models.CharField(max_length=100)
street_address = models.CharField(max_length=200)
postcode = models.CharField(max_length=5) #Finnish postal code length
city = models.CharField(max_length=100)
contact_person = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
business_id = models.CharField(max_length=100, null=True, blank=True)
iban = models.CharField(max_length=100, null=True, blank=True)
location_vat = models.DecimalField(max_digits=10, decimal_places=2)
class Meta:
verbose_name_plural = 'Company'
def __unicode__(self):
return u'%s' % (self.name)
| apache-2.0 | 4,771,702,382,574,460,000 | 35.448276 | 104 | 0.675497 | false | 3.454248 | false | false | false |
shoaibkamil/asp | asp/jit/asp_module.py | 1 | 19070 | import codepy, codepy.jit, codepy.toolchain, codepy.bpl, codepy.cuda
from asp.util import *
import asp.codegen.cpp_ast as cpp_ast
import pickle
from variant_history import *
import sqlite3
import asp
import scala_module
class ASPDB(object):
def __init__(self, specializer, persistent=False):
"""
specializer must be specified so we avoid namespace collisions.
"""
self.specializer = specializer
if persistent:
# create db file or load db
# create a per-user cache directory
import tempfile, os
if os.name == 'nt':
username = os.environ['USERNAME']
else:
username = os.environ['LOGNAME']
self.cache_dir = tempfile.gettempdir() + "/asp_cache_" + username
if not os.access(self.cache_dir, os.F_OK):
os.mkdir(self.cache_dir)
self.db_file = self.cache_dir + "/aspdb.sqlite3"
self.connection = sqlite3.connect(self.db_file)
self.connection.execute("PRAGMA temp_store = MEMORY;")
self.connection.execute("PRAGMA synchronous = OFF;")
else:
self.db_file = None
self.connection = sqlite3.connect(":memory:")
def create_specializer_table(self):
self.connection.execute('create table '+self.specializer+' (fname text, variant text, key text, perf real)')
self.connection.commit()
def close(self):
self.connection.close()
def table_exists(self):
"""
Test if a table corresponding to this specializer exists.
"""
cursor = self.connection.cursor()
cursor.execute('select name from sqlite_master where name="%s"' % self.specializer)
result = cursor.fetchall()
return len(result) > 0
def insert(self, fname, variant, key, value):
if (not self.table_exists()):
self.create_specializer_table()
self.connection.execute('insert into '+self.specializer+' values (?,?,?,?)',
(fname, variant, key, value))
self.connection.commit()
def get(self, fname, variant=None, key=None):
"""
Return a list of entries. If key and variant not specified, all entries from
fname are returned.
"""
if (not self.table_exists()):
self.create_specializer_table()
return []
cursor = self.connection.cursor()
query = "select * from %s where fname=?" % (self.specializer,)
params = (fname,)
if variant:
query += " and variant=?"
params += (variant,)
if key:
query += " and key=?"
params += (key,)
cursor.execute(query, params)
return cursor.fetchall()
def update(self, fname, variant, key, value):
"""
Updates an entry in the db. Overwrites the timing information with value.
If the entry does not exist, does an insert.
"""
if (not self.table_exists()):
self.create_specializer_table()
self.insert(fname, variant, key, value)
return
# check if the entry exists
query = "select count(*) from "+self.specializer+" where fname=? and variant=? and key=?;"
cursor = self.connection.cursor()
cursor.execute(query, (fname, variant, key))
count = cursor.fetchone()[0]
# if it exists, do an update, otherwise do an insert
if count > 0:
query = "update "+self.specializer+" set perf=? where fname=? and variant=? and key=?"
self.connection.execute(query, (value, fname, variant, key))
self.connection.commit()
else:
self.insert(fname, variant, key, value)
def delete(self, fname, variant, key):
"""
Deletes an entry from the db.
"""
if (not self.table_exists()):
return
query = "delete from "+self.specializer+" where fname=? and variant=? and key=?"
self.connection.execute(query, (fname, variant, key))
self.connection.commit()
def destroy_db(self):
"""
Delete the database.
"""
if not self.db_file:
return True
import os
try:
self.close()
os.remove(self.db_file)
except:
return False
else:
return True
class SpecializedFunction(object):
"""
Class that encapsulates a function that is specialized. It keeps track of variants,
their timing information, which backend, functions to determine if a variant
can run, as well as a function to generate keys from parameters.
The signature for any run_check function is run(*args, **kwargs).
The signature for the key function is key(self, *args, **kwargs), where the args/kwargs are
what are passed to the specialized function.
"""
def __init__(self, name, backend, db, variant_names=[], variant_funcs=[], run_check_funcs=[],
key_function=None, call_policy=None):
self.name = name
self.backend = backend
self.db = db
self.variant_names = []
self.variant_funcs = []
self.run_check_funcs = []
self.call_policy = call_policy
if variant_names != [] and run_check_funcs == []:
run_check_funcs = [lambda *args,**kwargs: True]*len(variant_names)
for x in xrange(len(variant_names)):
self.add_variant(variant_names[x], variant_funcs[x], run_check_funcs[x])
if key_function:
self.key = key_function
def key(self, *args, **kwargs):
"""
Function to generate keys. This should almost always be overridden by a specializer, to make
sure the information stored in the key is actually useful.
"""
import hashlib
return hashlib.md5(str(args)+str(kwargs)).hexdigest()
def add_variant(self, variant_name, variant_func, run_check_func=lambda *args,**kwargs: True):
"""
Add a variant of this function. Must have same call signature. Variant names must be unique.
The variant_func parameter should be a CodePy Function object or a string defining the function.
The run_check_func parameter should be a lambda function with signature run(*args,**kwargs).
"""
if variant_name in self.variant_names:
raise Exception("Attempting to add a variant with an already existing name %s to %s" %
(variant_name, self.name))
self.variant_names.append(variant_name)
self.variant_funcs.append(variant_func)
self.run_check_funcs.append(run_check_func)
if isinstance(self.backend.module, scala_module.ScalaModule):
self.backend.module.add_to_module(variant_func)
self.backend.module.add_to_init(variant_name)
elif isinstance(variant_func, basestring):
if isinstance(self.backend.module, codepy.cuda.CudaModule):#HACK because codepy's CudaModule doesn't have add_to_init()
self.backend.module.boost_module.add_to_module([cpp_ast.Line(variant_func)])
self.backend.module.boost_module.add_to_init([cpp_ast.Statement("boost::python::def(\"%s\", &%s)" % (variant_name, variant_name))])
else:
self.backend.module.add_to_module([cpp_ast.Line(variant_func)])
if self.call_policy == "python_gc":
self.backend.module.add_to_init([cpp_ast.Statement("boost::python::def(\"%s\", &%s, boost::python::return_value_policy<boost::python::manage_new_object>())" % (variant_name, variant_name))])
else:
self.backend.module.add_to_init([cpp_ast.Statement("boost::python::def(\"%s\", &%s)" % (variant_name, variant_name))])
else:
self.backend.module.add_function(variant_func)
self.backend.dirty = True
def pick_next_variant(self, *args, **kwargs):
"""
Logic to pick the next variant to run. If all variants have been run, then this should return the
fastest variant.
"""
# get variants that have run
already_run = self.db.get(self.name, key=self.key(*args, **kwargs))
if already_run == []:
already_run_variant_names = []
else:
already_run_variant_names = map(lambda x: x[1], already_run)
# which variants haven't yet run
candidates = set(self.variant_names) - set(already_run_variant_names)
# of these candidates, which variants *can* run
for x in candidates:
if self.run_check_funcs[self.variant_names.index(x)](*args, **kwargs):
return x
# if none left, pick fastest from those that have already run
return sorted(already_run, lambda x,y: cmp(x[3],y[3]))[0][1]
def __call__(self, *args, **kwargs):
"""
Calling an instance of SpecializedFunction will actually call either the next variant to test,
or the already-determined best variant.
"""
if self.backend.dirty:
self.backend.compile()
which = self.pick_next_variant(*args, **kwargs)
import time
start = time.time()
ret_val = self.backend.get_compiled_function(which).__call__(*args, **kwargs)
elapsed = time.time() - start
#FIXME: where should key function live?
#print "doing update with %s, %s, %s, %s" % (self.name, which, self.key(args, kwargs), elapsed)
self.db.update(self.name, which, self.key(*args, **kwargs), elapsed)
#TODO: Should we use db.update instead of db.insert to avoid O(N) ops on already_run_variant_names = map(lambda x: x[1], already_run)?
return ret_val
class HelperFunction(SpecializedFunction):
"""
HelperFunction defines a SpecializedFunction that is not timed, and usually not called directly
(although it can be).
"""
def __init__(self, name, func, backend):
self.name = name
self.backend = backend
self.variant_names, self.variant_funcs, self.run_check_funcs = [], [], []
self.call_policy = None
self.add_variant(name, func)
def __call__(self, *args, **kwargs):
if self.backend.dirty:
self.backend.compile()
return self.backend.get_compiled_function(self.name).__call__(*args, **kwargs)
class ASPBackend(object):
"""
Class to encapsulate a backend for Asp. A backend is the combination of a CodePy module
(which contains the actual functions) and a CodePy compiler toolchain.
"""
def __init__(self, module, toolchain, cache_dir, host_toolchain=None):
self.module = module
self.toolchain = toolchain
self.host_toolchain = host_toolchain
self.compiled_module = None
self.cache_dir = cache_dir
self.dirty = True
self.compilable = True
def compile(self):
"""
Trigger a compile of this backend. Note that CUDA needs to know about the C++
backend as well.
"""
if not self.compilable: return
if isinstance(self.module, codepy.cuda.CudaModule):
self.compiled_module = self.module.compile(self.host_toolchain,
self.toolchain,
debug=True, cache_dir=self.cache_dir)
else:
self.compiled_module = self.module.compile(self.toolchain,
debug=True, cache_dir=self.cache_dir)
self.dirty = False
def get_compiled_function(self, name):
"""
Return a callable for a raw compiled function (that is, this must be a variant name rather than
a function name).
"""
try:
func = getattr(self.compiled_module, name)
except:
raise AttributeError("Function %s not found in compiled module." % (name,))
return func
class ASPModule(object):
"""
ASPModule is the main coordination class for specializers. A specializer creates an ASPModule to contain
all of its specialized functions, and adds functions/libraries/etc to the ASPModule.
ASPModule uses ASPBackend instances for each backend, ASPDB for its backing db for recording timing info,
and instances of SpecializedFunction and HelperFunction for specialized and helper functions, respectively.
"""
#FIXME: specializer should be required.
def __init__(self, specializer="default_specializer", cache_dir=None, use_cuda=False, use_cilk=False, use_tbb=False, use_pthreads=False, use_scala=False):
self.specialized_functions= {}
self.helper_method_names = []
self.db = ASPDB(specializer)
if cache_dir:
self.cache_dir = cache_dir
else:
# create a per-user cache directory
import tempfile, os
if os.name == 'nt':
username = os.environ['USERNAME']
else:
username = os.environ['LOGNAME']
self.cache_dir = tempfile.gettempdir() + "/asp_cache_" + username
if not os.access(self.cache_dir, os.F_OK):
os.mkdir(self.cache_dir)
self.backends = {}
self.backends["c++"] = ASPBackend(codepy.bpl.BoostPythonModule(),
codepy.toolchain.guess_toolchain(),
self.cache_dir)
if use_cuda:
self.backends["cuda"] = ASPBackend(codepy.cuda.CudaModule(self.backends["c++"].module),
codepy.toolchain.guess_nvcc_toolchain(),
self.cache_dir,
self.backends["c++"].toolchain)
self.backends['cuda'].module.add_to_preamble([cpp_ast.Include('cuda.h', True)]) # codepy.CudaModule doesn't do this automatically for some reason
self.backends['cuda'].module.add_to_preamble([cpp_ast.Include('cuda_runtime.h', True)]) # codepy.CudaModule doesn't do this automatically for some reason
self.backends['c++'].module.add_to_preamble([cpp_ast.Include('cuda_runtime.h', True)]) # codepy.CudaModule doesn't do this automatically for some reason
self.backends["cuda"].toolchain.cflags += ["-shared"]
if use_cilk:
self.backends["cilk"] = self.backends["c++"]
self.backends["cilk"].toolchain.cc = "icc"
if use_tbb:
self.backends["tbb"] = self.backends["c++"]
self.backends["tbb"].toolchain.cflags += ["-ltbb"]
if use_pthreads:
self.backends["pthreads"] = self.backends["c++"]
self.backends["pthreads"].toolchain.cflags += ["-pthread"]
if use_scala:
self.backends["scala"] = ASPBackend(scala_module.ScalaModule(),
scala_module.ScalaToolchain(),
self.cache_dir)
def add_library(self, feature, include_dirs, library_dirs=[], libraries=[], backend="c++"):
self.backends[backend].toolchain.add_library(feature, include_dirs, library_dirs, libraries)
def add_cuda_arch_spec(self, arch):
archflag = '-arch='
if 'sm_' not in arch: archflag += 'sm_'
archflag += arch
self.backends["cuda"].toolchain.cflags += [archflag]
def add_header(self, include_file, brackets=False, backend="c++"):
"""
Add a header (e.g. #include "foo.h") to the module source file.
With brackets=True, it will be C++-style #include <foo> instead.
"""
self.backends[backend].module.add_to_preamble([cpp_ast.Include(include_file, brackets)])
def add_to_preamble(self, pa, backend="c++"):
if isinstance(pa, basestring):
pa = [cpp_ast.Line(pa)]
self.backends[backend].module.add_to_preamble(pa)
def add_to_init(self, stmt, backend="c++"):
if isinstance(stmt, str):
stmt = [cpp_ast.Line(stmt)]
if backend == "cuda":
self.backends[backend].module.boost_module.add_to_init(stmt) #HACK because codepy's CudaModule doesn't have add_to_init()
else:
self.backends[backend].module.add_to_init(stmt)
def add_to_module(self, block, backend="c++"):
if isinstance(block, basestring):
block = [cpp_ast.Line(block)]
self.backends[backend].module.add_to_module(block)
def add_function(self, fname, funcs, variant_names=[], run_check_funcs=[], key_function=None,
backend="c++", call_policy=None):
"""
Add a specialized function to the Asp module. funcs can be a list of variants, but then
variant_names is required (also a list). Each item in funcs should be a string function or
a cpp_ast FunctionDef.
"""
if not isinstance(funcs, list):
funcs = [funcs]
variant_names = [fname]
self.specialized_functions[fname] = SpecializedFunction(fname, self.backends[backend], self.db, variant_names,
variant_funcs=funcs,
run_check_funcs=run_check_funcs,
key_function=key_function,
call_policy=call_policy)
def add_helper_function(self, fname, func, backend="c++"):
"""
Add a helper function, which is a specialized function that it not timed and has a single variant.
"""
self.specialized_functions[fname] = HelperFunction(fname, func, self.backends[backend])
def expose_class(self, classname, backend="c++"):
"""
Expose a class or struct from C++ to Python, letting us pass instances back and forth
between Python and C++.
TODO: allow exposing *functions* within the class
"""
self.backends[backend].module.add_to_init([cpp_ast.Line("boost::python::class_<%s>(\"%s\");\n" % (classname, classname))])
def __getattr__(self, name):
if name in self.specialized_functions:
return self.specialized_functions[name]
else:
raise AttributeError("No method %s found; did you add it to this ASPModule?" % name)
def generate(self):
"""
Utility function for, during development, dumping out the generated
source from all the underlying backends.
"""
src = ""
for x in self.backends.keys():
src += "\nSource code for backend '" + x + "':\n"
src += str(self.backends[x].module.generate())
return src
| bsd-3-clause | 484,791,479,455,267,650 | 40.187905 | 210 | 0.581122 | false | 4.176522 | false | false | false |
kogotko/carburetor | openstack_dashboard/dashboards/project/firewalls/views.py | 1 | 16454 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls \
import forms as fw_forms
from openstack_dashboard.dashboards.project.firewalls \
import tabs as fw_tabs
from openstack_dashboard.dashboards.project.firewalls \
import workflows as fw_workflows
AddRouterToFirewall = fw_forms.AddRouterToFirewall
InsertRuleToPolicy = fw_forms.InsertRuleToPolicy
RemoveRouterFromFirewall = fw_forms.RemoveRouterFromFirewall
RemoveRuleFromPolicy = fw_forms.RemoveRuleFromPolicy
UpdateFirewall = fw_forms.UpdateFirewall
UpdatePolicy = fw_forms.UpdatePolicy
UpdateRule = fw_forms.UpdateRule
FirewallDetailsTabs = fw_tabs.FirewallDetailsTabs
FirewallTabs = fw_tabs.FirewallTabs
PolicyDetailsTabs = fw_tabs.PolicyDetailsTabs
RuleDetailsTabs = fw_tabs.RuleDetailsTabs
AddFirewall = fw_workflows.AddFirewall
AddPolicy = fw_workflows.AddPolicy
AddRule = fw_workflows.AddRule
class IndexView(tabs.TabbedTableView):
tab_group_class = FirewallTabs
template_name = 'project/firewalls/details_tabs.html'
page_title = _("Firewalls")
class AddRuleView(workflows.WorkflowView):
workflow_class = AddRule
template_name = "project/firewalls/addrule.html"
page_title = _("Add New Rule")
class AddPolicyView(workflows.WorkflowView):
workflow_class = AddPolicy
template_name = "project/firewalls/addpolicy.html"
page_title = _("Add New Policy")
class AddFirewallView(workflows.WorkflowView):
workflow_class = AddFirewall
template_name = "project/firewalls/addfirewall.html"
page_title = _("Add New Firewall")
def get_workflow(self):
if api.neutron.is_extension_supported(self.request,
'fwaasrouterinsertion'):
AddFirewall.register(fw_workflows.SelectRoutersStep)
workflow = super(AddFirewallView, self).get_workflow()
return workflow
class RuleDetailsView(tabs.TabView):
tab_group_class = (RuleDetailsTabs)
template_name = 'horizon/common/_detail.html'
page_title = "{{ rule.name|default:rule.id }}"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, **kwargs):
context = super(RuleDetailsView, self).get_context_data(**kwargs)
rule = self.get_data()
table = fw_tabs.RulesTable(self.request)
breadcrumb = [
(_("Rules"), reverse_lazy('horizon:project:firewalls:rules'))]
context["custom_breadcrumb"] = breadcrumb
context["rule"] = rule
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(rule)
return context
@memoized.memoized_method
def get_data(self):
try:
rule_id = self.kwargs['rule_id']
rule = api.fwaas.rule_get(self.request, rule_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve rule details.'),
redirect=self.failure_url)
return rule
def get_tabs(self, request, *args, **kwargs):
rule = self.get_data()
return self.tab_group_class(request, rule=rule, **kwargs)
class PolicyDetailsView(tabs.TabView):
tab_group_class = (PolicyDetailsTabs)
template_name = 'horizon/common/_detail.html'
page_title = "{{ policy.name|default:policy.id }}"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, **kwargs):
context = super(PolicyDetailsView, self).get_context_data(**kwargs)
policy = self.get_data()
table = fw_tabs.PoliciesTable(self.request)
breadcrumb = [
(_("Policies"),
reverse_lazy('horizon:project:firewalls:policies'))]
context["custom_breadcrumb"] = breadcrumb
context["policy"] = policy
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(policy)
return context
@memoized.memoized_method
def get_data(self):
try:
policy_id = self.kwargs['policy_id']
policy = api.fwaas.policy_get(self.request, policy_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve policy details.'),
redirect=self.failure_url)
return policy
def get_tabs(self, request, *args, **kwargs):
policy = self.get_data()
return self.tab_group_class(request, policy=policy, **kwargs)
class FirewallDetailsView(tabs.TabView):
tab_group_class = (FirewallDetailsTabs)
template_name = 'horizon/common/_detail.html'
page_title = "{{ firewall.name|default:firewall.id }}"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, **kwargs):
context = super(FirewallDetailsView, self).get_context_data(**kwargs)
firewall = self.get_data()
routers = self.get_routers_data(firewall)
table = fw_tabs.FirewallsTable(self.request)
context["firewall"] = firewall
context["routers"] = routers
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(firewall)
return context
@memoized.memoized_method
def get_data(self):
try:
firewall_id = self.kwargs['firewall_id']
firewall = api.fwaas.firewall_get(self.request, firewall_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve firewall details.'),
redirect=self.failure_url)
return firewall
@memoized.memoized_method
def get_routers_data(self, firewall):
routers = []
try:
if api.neutron.is_extension_supported(self.request,
'fwaasrouterinsertion'):
tenant_id = self.request.user.tenant_id
tenant_routers = api.neutron.router_list(self.request,
tenant_id=tenant_id)
router_ids = firewall.get_dict()['router_ids']
routers = [r for r in tenant_routers
if r['id'] in router_ids]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve list of routers.'), )
return routers
def get_tabs(self, request, *args, **kwargs):
firewall = self.get_data()
return self.tab_group_class(request, firewall=firewall, **kwargs)
class UpdateRuleView(forms.ModalFormView):
form_class = UpdateRule
form_id = "update_rule_form"
template_name = "project/firewalls/updaterule.html"
context_object_name = 'rule'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updaterule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Rule {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateRuleView, self).get_context_data(**kwargs)
context['rule_id'] = self.kwargs['rule_id']
args = (self.kwargs['rule_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rule_id = self.kwargs['rule_id']
try:
rule = api.fwaas.rule_get(self.request, rule_id)
return rule
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rule details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rule = self._get_object()
initial = rule.get_dict()
protocol = initial['protocol']
initial['protocol'] = protocol.upper() if protocol else 'ANY'
initial['action'] = initial['action'].upper()
return initial
class UpdatePolicyView(forms.ModalFormView):
form_class = UpdatePolicy
form_id = "update_policy_form"
template_name = "project/firewalls/updatepolicy.html"
context_object_name = 'policy'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatepolicy"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Policy {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdatePolicyView, self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
return initial
class UpdateFirewallView(forms.ModalFormView):
form_class = UpdateFirewall
form_id = "update_firewall_form"
template_name = "project/firewalls/updatefirewall.html"
context_object_name = 'firewall'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatefirewall"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Firewall {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateFirewallView, self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request,
firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class InsertRuleToPolicyView(forms.ModalFormView):
form_class = InsertRuleToPolicy
form_id = "update_policy_form"
template_name = "project/firewalls/insert_rule_to_policy.html"
context_object_name = 'policy'
submit_url = "horizon:project:firewalls:insertrule"
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Insert Rule to Policy")
def get_context_data(self, **kwargs):
context = super(InsertRuleToPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RemoveRuleFromPolicyView(forms.ModalFormView):
form_class = RemoveRuleFromPolicy
form_id = "update_policy_form"
template_name = "project/firewalls/remove_rule_from_policy.html"
context_object_name = 'policy'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:removerule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Remove Rule from Policy")
def get_context_data(self, **kwargs):
context = super(RemoveRuleFromPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RouterCommonView(forms.ModalFormView):
form_id = "update_firewall_form"
context_object_name = 'firewall'
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(RouterCommonView,
self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request, firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class AddRouterToFirewallView(RouterCommonView):
form_class = AddRouterToFirewall
template_name = "project/firewalls/add_router_to_firewall.html"
submit_url = "horizon:project:firewalls:addrouter"
page_title = _("Add Router to Firewall")
class RemoveRouterFromFirewallView(RouterCommonView):
form_class = RemoveRouterFromFirewall
template_name = "project/firewalls/remove_router_from_firewall.html"
submit_url = "horizon:project:firewalls:removerouter"
page_title = _("Remove Router from Firewall")
| apache-2.0 | -1,994,469,539,088,781,300 | 36.480638 | 78 | 0.633402 | false | 4.008283 | false | false | false |
stadelmanma/OpenPNM | OpenPNM/Network/__Delaunay__.py | 1 | 16258 | # -*- coding: utf-8 -*-
"""
===============================================================================
Delaunay: Generate random networks based on Delaunay Tessellations
===============================================================================
"""
import sys
import scipy as sp
import numpy as np
import OpenPNM.Utilities.vertexops as vo
import scipy.sparse as sprs
import scipy.spatial as sptl
import scipy.ndimage as spim
from scipy.spatial import Voronoi
from OpenPNM.Network import GenericNetwork
from OpenPNM.Base import logging
logger = logging.getLogger(__name__)
class Delaunay(GenericNetwork):
r"""
This class contains the methods for creating a *Delaunay* network topology
based connecting pores with a Delaunay tessellation.
Parameters
----------
name : string
A unique name for the network
domain_size : list of floats, [Lx,Ly,Lz]
Bounding cube for internal pore positions
num_pores : int
Number of pores to place randomly within domain
prob : 3D float array
Values should be between 0 and 1 as determines probability of point
with relative domain coordinates being kept. Array does not have to
be same size as domain because positions are re-scaled
base_points : [Np,3] float array
coordinates to use instead of random generation
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.Delaunay(num_pores=100,
... domain_size=[0.0001, 0.0001, 0.0001])
>>> pn.num_pores()
100
"""
def __init__(self, num_pores=None, domain_size=None, prob=None,
base_points=None, **kwargs):
"""
Create Delauny network object
"""
super().__init__(**kwargs)
self.generate(num_pores, domain_size, prob, base_points)
def generate(self, num_pores, domain_size, prob, base_points):
r"""
Method to trigger the generation of the network
"""
logger.info('Start of network topology generation')
self._generate_setup(num_pores, domain_size, base_points)
if base_points is not None:
try:
dim = sp.shape(base_points)[1]
if dim != 3:
raise Exception('base points must be 3D')
except:
raise Exception('base points must be 3D')
self['pore.coords'] = base_points
else:
self._generate_pores(prob)
self._generate_throats()
logger.debug('Network generation complete')
def _generate_setup(self, num_pores, domain_size, base_points):
r"""
Perform applicable preliminary checks and calculations required for
generation
"""
logger.debug('generate_setup: Perform preliminary calculations and checks')
if domain_size is None:
raise Exception('domain_size must always be specified')
if num_pores is None and base_points is None:
raise Exception('num_pores or base_points must be specified')
elif num_pores is None and base_points is not None:
num_pores = len(base_points)
elif num_pores is not None and base_points is not None:
logger.warning('both num_pores and base_points arguments given' +
' num_pores over-written')
num_pores = len(base_points)
self._Lx = domain_size[0]
self._Ly = domain_size[1]
self._Lz = domain_size[2]
self._Np = num_pores
r"""
TODO: Fix this, btype should be received as an argument
"""
self._btype = [0, 0, 0]
def _generate_pores(self, prob=None):
r"""
Generate the pores with numbering scheme.
"""
logger.info('Place randomly located pores in the domain')
if prob is not None:
coords = []
i = 0
while i < self._Np:
coord = np.random.rand(3)
[indx, indy, indz] = np.floor(coord*np.shape(prob)).astype(int)
p = prob[indx][indy][indz]
if np.random.rand(1) <= p:
coords.append(coord)
i += 1
coords = np.asarray(coords)
else:
coords = np.random.random([self._Np, 3])
coords *= np.array([self._Lx, self._Ly, self._Lz])
self['pore.coords'] = coords
def _generate_throats(self):
r"""
Generate the throats connections
"""
logger.info('Define connections between pores')
pts = self['pore.coords']
Np = len(pts)
# Generate 6 dummy domains to pad onto each face of real domain This
# prevents surface pores from making long range connections to each other
x, y, z = self['pore.coords'].T
if x.max() > self._Lx:
Lx = x.max()*1.05
else:
Lx = self._Lx
if y.max() > self._Ly:
Ly = y.max()*1.05
else:
Ly = self._Ly
if z.max() > self._Lz:
Lz = z.max()*1.05
else:
Lz = self._Lz
# Reflect in X = Lx and 0
Pxp = pts.copy()
Pxp[:, 0] = 2*Lx-Pxp[:, 0]
Pxm = pts.copy()
Pxm[:, 0] = Pxm[:, 0]*(-1)
# Reflect in Y = Ly and 0
Pyp = pts.copy()
Pyp[:, 1] = 2*Ly-Pxp[:, 1]
Pym = pts.copy()
Pym[:, 1] = Pxm[:, 1]*(-1)
# Reflect in Z = Lz and 0
Pzp = pts.copy()
Pzp[:, 2] = 2*Lz-Pxp[:, 2]
Pzm = pts.copy()
Pzm[:, 2] = Pxm[:, 2]*(-1)
# Add dummy domains to real domain
# Order important for boundary logic
pts = np.vstack((pts, Pxp, Pxm, Pyp, Pym, Pzp, Pzm))
# Perform tessellation
logger.debug('Beginning tessellation')
Tri = sptl.Delaunay(pts)
logger.debug('Converting tessellation to adjacency matrix')
adjmat = sprs.lil_matrix((Np, Np), dtype=int)
for i in sp.arange(0, sp.shape(Tri.simplices)[0]):
# Keep only simplices that are fully in real domain
# this used to be vectorize, but it stopped working...change in scipy?
for j in Tri.simplices[i]:
if j < Np:
adjmat[j, Tri.simplices[i][Tri.simplices[i] < Np]] = 1
# Remove duplicate (lower triangle) and self connections (diagonal)
# and convert to coo
adjmat = sprs.triu(adjmat, k=1, format="coo")
logger.debug('Conversion to adjacency matrix complete')
self['throat.conns'] = sp.vstack((adjmat.row, adjmat.col)).T
self['pore.all'] = np.ones(len(self['pore.coords']), dtype=bool)
self['throat.all'] = np.ones(len(self['throat.conns']), dtype=bool)
# Do Voronoi diagram - creating voronoi polyhedra around each pore and save
# vertex information
self._vor = Voronoi(pts)
all_vert_index = sp.ndarray(Np, dtype=object)
for i, polygon in enumerate(self._vor.point_region[0:Np]):
if -1 not in self._vor.regions[polygon]:
all_vert_index[i] = \
dict(zip(self._vor.regions[polygon],
self._vor.vertices[self._vor.regions[polygon]]))
# Add throat vertices by looking up vor.ridge_dict
throat_verts = sp.ndarray(len(self['throat.conns']), dtype=object)
for i, (p1, p2) in enumerate(self['throat.conns']):
try:
throat_verts[i] = \
dict(zip(self._vor.ridge_dict[(p1, p2)],
self._vor.vertices[self._vor.ridge_dict[(p1, p2)]]))
except KeyError:
try:
throat_verts[i] = \
dict(zip(self._vor.ridge_dict[(p2, p1)],
self._vor.vertices[self._vor.ridge_dict[(p2, p1)]]))
except KeyError:
logger.error('Throat Pair Not Found in Voronoi Ridge Dictionary')
self['pore.vert_index'] = all_vert_index
self['throat.vert_index'] = throat_verts
logger.debug(sys._getframe().f_code.co_name + ': End of method')
def add_boundaries(self):
r"""
This method identifies pores in the original Voronoi object that straddle a
boundary imposed by the reflection. The pore inside the original set of pores
(with index 0 - Np) is identified and the coordinates are saved. The vertices
making up the boundary throat are retrieved from the ridge_dict values and
these are used to identify which boundary the throat sits at.
A new pore and new connection is created with coordinates lying on the
boundary plane.
N.B This method will only work properly if the original network remains
unaltered i.e. not trimmed or extended
This preserves the connection between pore index on the network object
and the Voronoi object
The point of using this method is so that the throat vertices created by
the Voronoi object are preserved
This method will create boundary pores at the centre of the voronoi faces
that align with the outer planes of the domain.
The original pores in the domain are labelled internal and the boundary pores
are labelled external
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.Delaunay(num_pores=100,
... domain_size=[0.0001,0.0001,0.0001])
>>> pn.add_boundaries()
>>> pn.num_pores('boundary') > 0
True
"""
bound_conns = []
bound_coords = []
bound_vert_index = []
throat_vert_index = []
# Find boundary extent
[x_min, x_max, y_min, y_max, z_min, z_max] = \
vo.vertex_dimension(self, self.pores(), parm='minmax')
min_point = np.around(np.array([x_min, y_min, z_min]), 10)
max_point = np.around(np.array([x_max, y_max, z_max]), 10)
Np = self.num_pores()
Nt = self.num_throats()
new_throat_count = 0
# ridge_dict contains a dictionary where the key is a set of 2 neighbouring
# pores and the value is the vertex indices that form the throat or ridge
# between them
for p, v in self._vor.ridge_dict.items():
# If the vertex with index -1 is contained in list then the ridge is
# unbounded - ignore these
if np.all(np.asarray(v) >= 0):
# Boundary throats will be those connecting one pore inside the
# original set and one out
if (p[0] in range(Np) and p[1] not in range(Np)) or \
(p[0] not in range(Np) and p[1] in range(Np)):
# The dictionary key is not in numerical order so find the pore
# index inside
if p[0] in range(Np):
my_pore = p[0]
else:
my_pore = p[1]
my_pore_coord = self["pore.coords"][my_pore]
new_pore_coord = my_pore_coord.copy()
# Rounding necessary here to identify the plane as Voronoi can
# have 1e-17 and smaller errors
throat_verts = np.around(self._vor.vertices[v], 10)
# Find which plane we are aligned with (if any) and align
# new_pore with throat plane
if len(np.unique(throat_verts[:, 0])) == 1:
new_pore_coord[0] = np.unique(throat_verts[:, 0])
elif len(np.unique(throat_verts[:, 1])) == 1:
new_pore_coord[1] = np.unique(throat_verts[:, 1])
elif len(np.unique(throat_verts[:, 2])) == 1:
new_pore_coord[2] = np.unique(throat_verts[:, 2])
else:
new_pore_coord = np.mean(throat_verts, axis=0)
pass
bound_coords.append(new_pore_coord)
bound_conns.append(np.array([my_pore, new_throat_count + Np]))
bound_vert_index.append(dict(zip(v, throat_verts)))
throat_vert_index.append(dict(zip(v, throat_verts)))
new_throat_count += 1
# Add new pores and connections
self.extend(pore_coords=bound_coords, throat_conns=bound_conns)
# Record new number of pores
Mp = self.num_pores()
Mt = self.num_throats()
new_pore_ids = np.arange(Np, Mp)
new_throat_ids = np.arange(Nt, Mt)
# Identify which boundary the pore sits on
front = self.pores()[self['pore.coords'][:, 0] == min_point[0]]
back = self.pores()[self['pore.coords'][:, 0] == max_point[0]]
left = self.pores()[self['pore.coords'][:, 1] == min_point[1]]
right = self.pores()[self['pore.coords'][:, 1] == max_point[1]]
bottom = self.pores()[self['pore.coords'][:, 2] == min_point[2]]
top = self.pores()[self['pore.coords'][:, 2] == max_point[2]]
if len(top) == 0:
top = self.pores()[self['pore.coords'][:, 2] ==
np.asarray(bound_coords)[:, 2].max()]
# Assign labels
self['pore.boundary'] = False
self['pore.boundary'][new_pore_ids] = True
self['throat.boundary'] = False
self['throat.boundary'][new_throat_ids] = True
self['pore.right_boundary'] = False
self['pore.left_boundary'] = False
self['pore.front_boundary'] = False
self['pore.back_boundary'] = False
self['pore.top_boundary'] = False
self['pore.bottom_boundary'] = False
self['pore.right_boundary'][right] = True
self['pore.left_boundary'][left] = True
self['pore.front_boundary'][front] = True
self['pore.back_boundary'][back] = True
self['pore.top_boundary'][top] = True
self['pore.bottom_boundary'][bottom] = True
# Save the throat verts
self["pore.vert_index"][new_pore_ids] = bound_vert_index
self["throat.vert_index"][new_throat_ids] = throat_vert_index
def domain_length(self, face_1, face_2):
r"""
Returns the distance between two faces
No coplanar checking this is done in vertex_dimension
"""
L = vo.vertex_dimension(self, face_1, face_2, parm='length')
return L
def domain_area(self, face):
r"""
Returns the area of a face
No coplanar checking this is done in vertex_dimension
"""
A = vo.vertex_dimension(self, face, parm='area')
return A
def _export_vor_fibres(self):
r"""
Run through the throat vertices, compute the convex hull order and save
the vertices and ordered faces in a pickle dictionary to be used in
blender
"""
import pickle as pickle
Indices = []
for t in self.throats():
indices = list(self["throat.vert_index"][t].keys())
verts = self._vor.vertices[indices]
# Need to order the indices in convex hull order
# Compute the standard deviation in all coordinates and eliminate
# the axis with the smallest to make 2d
stds = [np.std(verts[:, 0]), np.std(verts[:, 1]), np.std(verts[:, 2])]
if np.argmin(stds) == 0:
verts2d = np.vstack((verts[:, 1], verts[:, 2])).T
elif np.argmin(stds) == 1:
verts2d = np.vstack((verts[:, 0], verts[:, 2])).T
else:
verts2d = np.vstack((verts[:, 0], verts[:, 1])).T
# 2d convexhull returns vertices in hull order
hull2d = sptl.ConvexHull(verts2d, qhull_options='QJ Pp')
# Re-order the vertices and save as list (blender likes them as lists)
Indices.append(np.asarray(indices)[hull2d.vertices].tolist())
# Create dictionary to pickle
data = {}
data["Verts"] = self._vor.vertices
data["Indices"] = Indices
pickle.dump(data, open("fibres.p", "wb"))
| mit | 3,054,005,862,668,982,000 | 41.560209 | 85 | 0.548714 | false | 3.734895 | false | false | false |
FabriceSalvaire/mupdf-v1.3 | bindings/example.py | 1 | 15058 | #! /usr/bin/env python
# -*- Python -*-
####################################################################################################
import argparse
import sys
import numpy as np
import mupdf as cmupdf
from MuPDF import *
from PyQt4 import QtCore, QtGui
####################################################################################################
def show_metadata(ctx, doc):
for key in (
'Title',
'Subject',
'Author',
'Creator',
'Producer',
'CreationDate',
'ModDate',
):
print cmupdf.get_meta_info(doc, key, 1024)
fz_buffer = cmupdf.pdf_metadata(doc)
print cmupdf.fz_buffer_data(fz_buffer)
cmupdf.fz_drop_buffer(ctx, fz_buffer)
####################################################################################################
def show_pdf(np_array):
application = QtGui.QApplication(sys.argv)
height, width = np_array.shape[:2]
image = QtGui.QImage(np_array.data, width, height, QtGui.QImage.Format_ARGB32)
label = QtGui.QLabel()
label.setPixmap(QtGui.QPixmap.fromImage(image))
area = QtGui.QScrollArea()
area.setWidget(label)
area.setWindowTitle(args.filename)
area.show()
application.exec_()
####################################################################################################
def get_font_name(font):
font_name = cmupdf.get_font_name(font)
i = font_name.find('+')
if i:
font_name = font_name[i+1:]
return font_name
####################################################################################################
def dump_bbox(obj):
return "[%g %g %g %g]" % (obj.bbox.x0, obj.bbox.y0,
obj.bbox.x1, obj.bbox.y1)
####################################################################################################
def dump_text_style(text_sheet):
style = text_sheet.style
while style:
font = style.font
message = "span.s%u{font-family:\"%s\";font-size:%gpt" % (style.id, get_font_name(font), style.size)
if cmupdf.font_is_italic(font):
message += ';font-style:italic'
if cmupdf.font_is_bold(font):
message += ';font-weight:bold;'
message += '}'
print message
style = style.next
####################################################################################################
def dump_text_page_xml(text_page):
print "<page>"
for block in TextBlockIterator(text_page):
print "<block bbox=\"" + dump_bbox(block) + "\">"
for line in TextLineIterator(block):
print " "*2 + "<line bbox=\"" + dump_bbox(line) + "\">"
for span in TextSpanIterator(line):
print " "*4 + "<span bbox=\"" + dump_bbox(span) + "\" \">"
for ch in TextCharIterator(span):
style = ch.style
font_name = get_font_name(style.font)
print " "*6 + "<char " + \
u" c=\"%s\" font=\"%s\" size=\"%g\"/>" % (unichr(ch.c), font_name, style.size)
print " "*4 + "</span>"
print " "*2 + "</line>"
print "</block>"
print "</page>"
####################################################################################################
def dump_text_page(text_page):
empty_block = False
for block in TextBlockIterator(text_page):
if not empty_block:
print '\n<Block>'
empty_block = True
for line in TextLineIterator(block):
line_text = u''
for span in TextSpanIterator(line):
span_text = u''
for ch in TextCharIterator(span):
span_text += unichr(ch.c)
span_text = span_text.rstrip()
if span_text:
line_text += '<Span>' + span_text + '</Span>'
else:
line_text += '<Empty Span>'
if line_text:
print line_text
empty_block = False
####################################################################################################
class GrowingTextBrowser(QtGui.QTextBrowser):
_id = 0
##############################################
def __init__(self, *args, **kwargs):
GrowingTextBrowser._id += 1
self._id = GrowingTextBrowser._id
super(GrowingTextBrowser, self).__init__(*args, **kwargs)
size_policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
size_policy.setHeightForWidth(True)
self.setSizePolicy(size_policy)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
##############################################
def setPlainText(self, text):
super(GrowingTextBrowser, self).setPlainText(text)
self._text = text
##############################################
def print_document_size(self, document=None):
if document is None:
document = self.document()
document_size = document.size()
print "Document width", document_size.width(), 'height', document_size.height()
##############################################
def sizePolicy(self):
size_policy = super(GrowingTextBrowser, self).sizePolicy()
print 'GrowingTextBrowser.sizePolicy', self._id, \
size_policy.horizontalPolicy(), size_policy.verticalPolicy()
return size_policy
##############################################
def sizeHint(self):
size = super(GrowingTextBrowser, self).sizeHint()
print 'GrowingTextBrowser.sizeHint', self._id, size.width(), size.height()
return QtCore.QSize(0, 0)
##############################################
def minimumSizeHint(self):
size = super(GrowingTextBrowser, self).minimumSizeHint()
print 'GrowingTextBrowser.minimumSizeHint', self._id, size.width(), size.height()
return QtCore.QSize(0, 0)
##############################################
def heightForWidth(self, width):
print 'GrowingTextBrowser.heightForWidth', self._id, width
document = QtGui.QTextDocument(self._text)
document.setPageSize(QtCore.QSizeF(width, -1))
height = document.documentLayout().documentSize().toSize().height()
self.print_document_size(document)
return height + self.font().pointSize()
##############################################
def resizeEvent(self, event):
print 'GrowingTextBrowser.resizeEvent', self._id, \
'old', event.oldSize().width(), event.oldSize().height(), \
'new', event.size().width(), event.size().height()
self.print_document_size()
return super(GrowingTextBrowser, self).resizeEvent(event)
####################################################################################################
def append_block(parent, vertical_layout, source_text):
text_browser = GrowingTextBrowser(parent)
text_browser.setPlainText(source_text)
# vertical_layout.addWidget(text_browser)
horizontal_layout = QtGui.QHBoxLayout()
horizontal_layout.addWidget(text_browser, 0, QtCore.Qt.AlignTop)
vertical_layout.addLayout(horizontal_layout)
def show_text_page(text_page):
application = QtGui.QApplication(sys.argv)
main_window = QtGui.QMainWindow()
main_window.resize(1000, 800)
main_window.setWindowTitle(args.filename)
scroll_area = QtGui.QScrollArea(main_window)
# scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
scroll_area.setWidgetResizable(True)
main_window.setCentralWidget(scroll_area)
container_widget = QtGui.QWidget()
vertical_layout = QtGui.QVBoxLayout(container_widget) # Set container_widget layout
scroll_area.setWidget(container_widget)
for block in TextBlockIterator(text_page):
block_text = u''
for line in TextLineIterator(block):
line_text = u''
for span in TextSpanIterator(line):
span_text = u''
for ch in TextCharIterator(span):
span_text += unichr(ch.c)
span_text = span_text.rstrip()
if span_text: # Append span to line
line_text += span_text
else: # Empty span then append a block
if block_text:
append_block(container_widget, vertical_layout, block_text)
block_text = u''
line_text = u''
# Append line to block
if block_text:
block_text += ' '
block_text += line_text
if block_text:
append_block(container_widget, vertical_layout, block_text)
spacer_item = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
vertical_layout.addItem(spacer_item)
print 'Show'
#main_window.show()
main_window.showMaximized()
application.exec_()
####################################################################################################
argument_parser = argparse.ArgumentParser(description='Example.')
argument_parser.add_argument('filename', metavar='FILENAME',
help='PDF file')
argument_parser.add_argument('--page', dest='page_number',
type=int,
default=1,
help='Page number')
argument_parser.add_argument('--zoom', dest='zoom',
type=int,
default=100,
help='Zoom factor in %%')
argument_parser.add_argument('--rotation', dest='rotation',
type=int,
default=0,
help='Rotation')
args = argument_parser.parse_args()
####################################################################################################
# Create a context to hold the exception stack and various caches.
ctx = cmupdf.fz_new_context(None, None, cmupdf.FZ_STORE_UNLIMITED)
####################################################################################################
# Open the PDF, XPS or CBZ document.
doc = cmupdf.fz_open_document(ctx, args.filename)
show_metadata(ctx, doc)
####################################################################################################
# Retrieve the number of pages (not used in this example).
page_count = cmupdf.fz_count_pages(doc)
# Load the page we want. Page numbering starts from zero.
page = cmupdf.fz_load_page(doc, args.page_number -1)
####################################################################################################
# Calculate a transform to use when rendering. This transform contains the scale and
# rotation. Convert zoom percentage to a scaling factor. Without scaling the resolution is 72 dpi.
transform = cmupdf.fz_matrix_s()
cmupdf.fz_rotate(transform, args.rotation)
cmupdf.fz_pre_scale(transform, args.zoom / 100.0, args.zoom / 100.0)
# Take the page bounds and transform them by the same matrix that we will use to render the page.
bounds = cmupdf.fz_rect_s()
cmupdf.fz_bound_page(doc, page, bounds)
cmupdf.fz_transform_rect(bounds, transform)
####################################################################################################
# A page consists of a series of objects (text, line art, images, gradients). These objects are
# passed to a device when the interpreter runs the page. There are several devices, used for
# different purposes:
#
# draw device -- renders objects to a target pixmap.
#
# text device -- extracts the text in reading order with styling
# information. This text can be used to provide text search.
#
# list device -- records the graphic objects in a list that can
# be played back through another device. This is useful if you
# need to run the same page through multiple devices, without
# the overhead of parsing the page each time.
####################################################################################################
# Create a blank pixmap to hold the result of rendering. The pixmap bounds used here are the same as
# the transformed page bounds, so it will contain the entire page. The page coordinate space has the
# origin at the top left corner and the x axis extends to the right and the y axis extends down.
bbox = cmupdf.fz_irect_s()
cmupdf.fz_round_rect(bbox, bounds)
width, height = bbox.x1 - bbox.x0, bbox.y1 - bbox.y0
np_array = np.zeros((height, width, 4), dtype=np.uint8)
# pixmap = cmupdf.fz_new_pixmap_with_bbox(ctx, cmupdf.get_fz_device_rgb(), bbox)
pixmap = cmupdf.fz_new_pixmap_with_bbox_and_data(ctx, cmupdf.fz_device_rgb(ctx), bbox,
cmupdf.numpy_to_pixmap(np_array))
cmupdf.fz_clear_pixmap_with_value(ctx, pixmap, 0xff)
# Create a draw device with the pixmap as its target.
# Run the page with the transform.
device = cmupdf.fz_new_draw_device(ctx, pixmap)
cmupdf.fz_set_aa_level(ctx, 8)
cmupdf.fz_run_page(doc, page, device, transform, None)
cmupdf.fz_free_device(device)
if True:
show_pdf(np_array)
if False:
# Save the pixmap to a file.
cmupdf.fz_write_png(ctx, pixmap, "out.png", 0)
####################################################################################################
text_sheet = cmupdf.fz_new_text_sheet(ctx)
text_page = cmupdf.fz_new_text_page(ctx)
device = cmupdf.fz_new_text_device(ctx, text_sheet, text_page)
cmupdf.fz_run_page(doc, page, device, transform, None)
cmupdf.fz_free_device(device)
if False:
# Dump text style and page.
dump_text_style(text_sheet)
dump_text_page_xml(text_page)
if True:
dump_text_page(text_page)
show_text_page(text_page)
if False:
file_handler = cmupdf.fz_fopen("out.css", "w+")
output_file = cmupdf.fz_new_output_with_file(ctx, file_handler)
cmupdf.fz_print_text_sheet(ctx, output_file, text_sheet)
cmupdf.fz_close_output(output_file)
cmupdf.fz_fclose(file_handler)
output_file = cmupdf.fz_fopen("out.txt", "w+")
output_file = cmupdf.fz_new_output_with_file(ctx, file_handler)
# cmupdf.fz_print_text_page(ctx, output_file, text_page)
# cmupdf.fz_print_text_page_html(ctx, output_file, text_page)
cmupdf.fz_print_text_page_xml(ctx, output_file, text_page)
cmupdf.fz_close_output(output_file)
cmupdf.fz_fclose(file_handler)
####################################################################################################
# Clean up.
cmupdf.fz_free_text_sheet(ctx, text_sheet)
cmupdf.fz_free_text_page(ctx, text_page)
cmupdf.fz_drop_pixmap(ctx, pixmap)
cmupdf.fz_free_page(doc, page)
cmupdf.fz_close_document(doc)
cmupdf.fz_free_context(ctx)
####################################################################################################
#
# End
#
####################################################################################################
| agpl-3.0 | 8,527,913,039,060,901,000 | 34.682464 | 108 | 0.521052 | false | 4.001594 | false | false | false |
twitter/pants | src/python/pants/backend/jvm/ivy_utils.py | 1 | 51293 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from abc import abstractmethod
from builtins import object, open, str
from collections import defaultdict, namedtuple
from functools import total_ordering
import six
from future.utils import PY3
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
PinnedJarArtifactSet)
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.java.util import execute_runner
from pants.util.collections_abc_backport import OrderedDict
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir, safe_open
from pants.util.fileutil import atomic_copy, safe_hardlink_or_copy
class IvyResolutionStep(object):
"""Ivy specific class for describing steps of performing resolution."""
# NB(nh): This class is the base class for the ivy resolve and fetch steps.
# It also specifies the abstract methods that define the components of resolution steps.
def __init__(self, confs, hash_name, pinned_artifacts, soft_excludes, ivy_resolution_cache_dir,
ivy_repository_cache_dir, ivy_workdir):
"""
:param confs: A tuple of string ivy confs to resolve for.
:param hash_name: A unique string name for this resolve.
:param pinned_artifacts: A tuple of "artifact-alikes" to force the versions of.
:param soft_excludes: A flag marking whether to pass excludes to Ivy or to apply them after the
fact.
:param ivy_repository_cache_dir: The cache directory used by Ivy for repository cache data.
:param ivy_resolution_cache_dir: The cache directory used by Ivy for resolution cache data.
:param ivy_workdir: A task-specific workdir that all ivy outputs live in.
"""
self.confs = confs
self.hash_name = hash_name
self.pinned_artifacts = pinned_artifacts
self.soft_excludes = soft_excludes
self.ivy_repository_cache_dir = ivy_repository_cache_dir
self.ivy_resolution_cache_dir = ivy_resolution_cache_dir
self.ivy_workdir = ivy_workdir
self.workdir_reports_by_conf = {c: self.resolve_report_path(c) for c in confs}
@abstractmethod
def required_load_files_exist(self):
"""The files required to load a previous resolve exist."""
@abstractmethod
def required_exec_files_exist(self):
"""The files to do a resolve exist."""
@abstractmethod
def load(self, targets):
"""Loads the result of a resolve or fetch."""
@abstractmethod
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
"""Runs the resolve or fetch and loads the result, returning it."""
@property
def workdir(self):
return os.path.join(self.ivy_workdir, self.hash_name)
@property
def hardlink_classpath_filename(self):
return os.path.join(self.workdir, 'classpath')
@property
def ivy_cache_classpath_filename(self):
return '{}.raw'.format(self.hardlink_classpath_filename)
@property
def frozen_resolve_file(self):
return os.path.join(self.workdir, 'resolution.json')
@property
def hardlink_dir(self):
return os.path.join(self.ivy_workdir, 'jars')
@abstractmethod
def ivy_xml_path(self):
"""Ivy xml location."""
@abstractmethod
def resolve_report_path(self, conf):
"""Location of the resolve report in the workdir."""
def _construct_and_load_hardlink_map(self):
artifact_paths, hardlink_map = IvyUtils.construct_and_load_hardlink_map(
self.hardlink_dir,
self.ivy_repository_cache_dir,
self.ivy_cache_classpath_filename,
self.hardlink_classpath_filename)
return artifact_paths, hardlink_map
def _call_ivy(self, executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name):
IvyUtils.do_resolve(executor,
extra_args,
ivyxml,
jvm_options,
self.workdir_reports_by_conf,
self.confs,
self.ivy_resolution_cache_dir,
self.ivy_cache_classpath_filename,
hash_name_for_report,
workunit_factory,
workunit_name)
class IvyFetchStep(IvyResolutionStep):
"""Resolves ivy artifacts using the coordinates from a previous resolve."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename) and
os.path.isfile(self.frozen_resolve_file))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'fetch-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'fetch-ivy.xml')
def required_exec_files_exist(self):
return os.path.isfile(self.frozen_resolve_file)
def load(self, targets):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
return self._load_from_fetch(frozen_resolutions)
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
self._do_fetch(executor, extra_args, frozen_resolutions, jvm_options,
workunit_name, workunit_factory)
result = self._load_from_fetch(frozen_resolutions)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.ivy_workdir,
result))
return result
def _load_from_fetch(self, frozen_resolutions):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyFetchResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf,
frozen_resolutions)
def _do_fetch(self, executor, extra_args, frozen_resolution, jvm_options, workunit_name,
workunit_factory):
# It's important for fetches to have a different ivy report from resolves as their
# contents differ.
hash_name_for_report = '{}-fetch'.format(self.hash_name)
ivyxml = self.ivy_xml_path
self._prepare_ivy_xml(frozen_resolution, ivyxml, hash_name_for_report)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, frozen_resolution, ivyxml, resolve_hash_name_for_report):
# NB(nh): Our ivy.xml ensures that we always get the default configuration, even if it's not
# part of the requested confs.
default_resolution = frozen_resolution.get('default')
if default_resolution is None:
raise IvyUtils.IvyError("Couldn't find the frozen resolution for the 'default' ivy conf.")
try:
jars = default_resolution.jar_dependencies
IvyUtils.generate_fetch_ivy(jars, ivyxml, self.confs, resolve_hash_name_for_report)
except Exception as e:
raise IvyUtils.IvyError('Failed to prepare ivy resolve: {}'.format(e))
class IvyResolveStep(IvyResolutionStep):
"""Resolves ivy artifacts and produces a cacheable file containing the resulting coordinates."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'resolve-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'resolve-ivy.xml')
def load(self, targets):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf)
def exec_and_load(self, executor, extra_args, targets, jvm_options,
workunit_name, workunit_factory):
self._do_resolve(executor, extra_args, targets, jvm_options, workunit_name, workunit_factory)
result = self.load(targets)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.ivy_workdir,
result))
frozen_resolutions_by_conf = result.get_frozen_resolutions_by_conf(targets)
FrozenResolution.dump_to_file(self.frozen_resolve_file, frozen_resolutions_by_conf)
return result
def _do_resolve(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory):
ivyxml = self.ivy_xml_path
hash_name = '{}-resolve'.format(self.hash_name)
self._prepare_ivy_xml(targets, ivyxml, hash_name)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, targets, ivyxml, hash_name):
# TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better
# diagnostics can be had in `IvyUtils.generate_ivy` if this is done.
# See: https://github.com/pantsbuild/pants/issues/2239
jars, global_excludes = IvyUtils.calculate_classpath(targets)
# Don't pass global excludes to ivy when using soft excludes.
if self.soft_excludes:
global_excludes = []
IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, self.confs,
hash_name, self.pinned_artifacts)
class FrozenResolution(object):
"""Contains the abstracted results of a resolve.
With this we can do a simple fetch.
"""
# TODO(nh): include full dependency graph in here.
# So that we can inject it into the build graph if we want to.
class MissingTarget(Exception):
"""Thrown when a loaded resolution has a target spec for a target that doesn't exist."""
def __init__(self):
self.target_to_resolved_coordinates = defaultdict(OrderedSet)
self.all_resolved_coordinates = OrderedSet()
self.coordinate_to_attributes = OrderedDict()
@property
def jar_dependencies(self):
return [
JarDependency(c.org, c.name, c.rev, classifier=c.classifier, ext=c.ext,
**self.coordinate_to_attributes.get(c, {}))
for c in self.all_resolved_coordinates]
def add_resolved_jars(self, target, resolved_jars):
coords = [j.coordinate for j in resolved_jars]
self.add_resolution_coords(target, coords)
# Assuming target is a jar library.
for j in target.jar_dependencies:
url = j.get_url(relative=True)
if url:
self.coordinate_to_attributes[j.coordinate] = {'url': url, 'base_path': j.base_path}
else:
self.coordinate_to_attributes[j.coordinate] = {}
def add_resolution_coords(self, target, coords):
for c in coords:
self.target_to_resolved_coordinates[target].add(c)
self.all_resolved_coordinates.add(c)
def target_spec_to_coordinate_strings(self):
return {t.address.spec: [str(c) for c in coordinates]
for t, coordinates in self.target_to_resolved_coordinates.items()}
def __repr__(self):
return 'FrozenResolution(\n target_to_resolved_coordinates\n {}\n all\n {}'.format(
'\n '.join(': '.join([t.address.spec,
'\n '.join(str(c) for c in cs)])
for t,cs in self.target_to_resolved_coordinates.items()),
'\n '.join(str(c) for c in self.coordinate_to_attributes.keys())
)
def __eq__(self, other):
return (type(self) == type(other) and
self.all_resolved_coordinates == other.all_resolved_coordinates and
self.target_to_resolved_coordinates == other.target_to_resolved_coordinates)
def __ne__(self, other):
return not self == other
@classmethod
def load_from_file(cls, filename, targets):
if not os.path.exists(filename):
return None
with open(filename, 'r') as f:
# Using OrderedDict here to maintain insertion order of dict entries.
from_file = json.load(f, object_pairs_hook=OrderedDict)
result = {}
target_lookup = {t.address.spec: t for t in targets}
for conf, serialized_resolution in from_file.items():
resolution = FrozenResolution()
def m2_for(c):
return M2Coordinate.from_string(c)
for coord, attr_dict in serialized_resolution['coord_to_attrs'].items():
m2 = m2_for(coord)
resolution.coordinate_to_attributes[m2] = attr_dict
for spec, coord_strs in serialized_resolution['target_to_coords'].items():
t = target_lookup.get(spec, None)
if t is None:
raise cls.MissingTarget('Cannot find target for address {} in frozen resolution'
.format(spec))
resolution.add_resolution_coords(t, [m2_for(c) for c in coord_strs])
result[conf] = resolution
return result
@classmethod
def dump_to_file(cls, filename, resolutions_by_conf):
res = {}
for conf, resolution in resolutions_by_conf.items():
res[conf] = OrderedDict([
['target_to_coords',resolution.target_spec_to_coordinate_strings()],
['coord_to_attrs', OrderedDict([str(c), attrs]
for c, attrs in resolution.coordinate_to_attributes.items())]
])
with safe_concurrent_creation(filename) as tmp_filename:
mode = 'w' if PY3 else 'wb'
with open(tmp_filename, mode) as f:
json.dump(res, f)
class IvyResolveResult(object):
"""The result of an Ivy resolution.
The result data includes the list of resolved artifacts, the relationships between those artifacts
and the targets that requested them and the hash name of the resolve.
"""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf):
self._reports_by_conf = reports_by_conf
self.resolved_artifact_paths = resolved_artifact_paths
self.resolve_hash_name = resolve_hash_name
self._hardlink_map = hardlink_map
@property
def has_resolved_artifacts(self):
"""The requested targets have a resolution associated with them."""
return self.resolve_hash_name is not None
def all_linked_artifacts_exist(self):
"""All of the artifact paths for this resolve point to existing files."""
if not self.has_resolved_artifacts:
return False
for path in self.resolved_artifact_paths:
if not os.path.isfile(path):
return False
else:
return True
def report_for_conf(self, conf):
"""Returns the path to the ivy report for the provided conf.
Returns None if there is no path.
"""
return self._reports_by_conf.get(conf)
def get_frozen_resolutions_by_conf(self, targets):
frozen_resolutions_by_conf = OrderedDict()
for conf in self._reports_by_conf:
frozen_resolution = FrozenResolution()
for target, resolved_jars in self.resolved_jars_for_each_target(conf, targets):
frozen_resolution.add_resolved_jars(target, resolved_jars)
frozen_resolutions_by_conf[conf] = frozen_resolution
return frozen_resolutions_by_conf
def resolved_jars_for_each_target(self, conf, targets):
"""Yields the resolved jars for each passed JarLibrary.
If there is no report for the requested conf, yields nothing.
:param conf: The ivy conf to load jars for.
:param targets: The collection of JarLibrary targets to find resolved jars for.
:yield: target, resolved_jars
:raises IvyTaskMixin.UnresolvedJarError
"""
ivy_info = self._ivy_info_for(conf)
if not ivy_info:
return
jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)]
ivy_jar_memo = {}
for target in jar_library_targets:
# Add the artifacts from each dependency module.
resolved_jars = self._resolved_jars_with_hardlinks(conf, ivy_info, ivy_jar_memo,
self._jar_dependencies_for_target(conf,
target),
target)
yield target, resolved_jars
def _jar_dependencies_for_target(self, conf, target):
return target.jar_dependencies
def _ivy_info_for(self, conf):
report_path = self._reports_by_conf.get(conf)
return IvyUtils.parse_xml_report(conf, report_path)
def _new_resolved_jar_with_hardlink_path(self, conf, target, resolved_jar_without_hardlink):
def candidate_cache_paths():
# There is a focus on being lazy here to avoid `os.path.realpath` when we can.
yield resolved_jar_without_hardlink.cache_path
yield os.path.realpath(resolved_jar_without_hardlink.cache_path)
for cache_path in candidate_cache_paths():
pants_path = self._hardlink_map.get(cache_path)
if pants_path:
break
else:
raise IvyResolveMappingError(
'Jar {resolved_jar} in {spec} not resolved to the ivy '
'hardlink map in conf {conf}.'
.format(spec=target.address.spec,
resolved_jar=resolved_jar_without_hardlink.cache_path,
conf=conf))
return ResolvedJar(coordinate=resolved_jar_without_hardlink.coordinate,
pants_path=pants_path,
cache_path=resolved_jar_without_hardlink.cache_path)
def _resolved_jars_with_hardlinks(self, conf, ivy_info, ivy_jar_memo, coordinates, target):
raw_resolved_jars = ivy_info.get_resolved_jars_for_coordinates(coordinates,
memo=ivy_jar_memo)
resolved_jars = [self._new_resolved_jar_with_hardlink_path(conf, target, raw_resolved_jar)
for raw_resolved_jar in raw_resolved_jars]
return resolved_jars
class IvyFetchResolveResult(IvyResolveResult):
"""A resolve result that uses the frozen resolution to look up dependencies."""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf,
frozen_resolutions):
super(IvyFetchResolveResult, self).__init__(resolved_artifact_paths, hardlink_map,
resolve_hash_name, reports_by_conf)
self._frozen_resolutions = frozen_resolutions
def _jar_dependencies_for_target(self, conf, target):
return self._frozen_resolutions[conf].target_to_resolved_coordinates.get(target, ())
NO_RESOLVE_RUN_RESULT = IvyResolveResult([], {}, None, {})
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
Dependency = namedtuple('DependencyAttributes',
['org', 'name', 'rev', 'mutable', 'force', 'transitive'])
Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
@total_ordering
class IvyModuleRef(object):
"""
:API: public
"""
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
# TODO(#6071): Return NotImplemented if other does not have attributes
def __lt__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return ((self.org, self.name, self.classifier or '', self.ext, self.rev) <
(other.org, other.name, other.classifier or '', other.ext, other.rev))
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
"""
:API: public
"""
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
def __repr__(self):
return 'IvyInfo(conf={}, refs={})'.format(self._conf, self.modules_by_ref.keys())
class IvyUtils(object):
"""Useful methods related to interaction with ivy.
:API: public
"""
# Protects ivy executions.
_ivy_lock = threading.RLock()
# Protect writes to the global map of jar path -> hardlinks to that jar.
_hardlink_map_lock = threading.Lock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.name, version=jar.rev)
@staticmethod
def _load_classpath_from_cachepath(path):
if not os.path.exists(path):
return []
else:
with safe_open(path, 'r') as cp:
return [_f for _f in (path.strip() for path in cp.read().split(os.pathsep)) if _f]
@classmethod
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf,
confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name,
workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args,
jvm_options=jvm_options,
executor=executor,
workunit_name=workunit_name,
workunit_factory=workunit_factory)
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'
.format(raw_target_classpath_file_tmp))
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name)
logger.debug('Moved ivy classfile file to {dest}'
.format(dest=ivy_cache_classpath_filename))
@classmethod
def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name):
for conf in confs:
ivy_cache_report_path = IvyUtils.xml_report_path(ivy_resolution_cache_dir, resolve_hash_name,
conf)
workdir_report_path = workdir_report_paths_by_conf[conf]
try:
atomic_copy(ivy_cache_report_path,
workdir_report_path)
except IOError as e:
raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}'
.format(ivy_cache_report_path, workdir_report_path, e))
@classmethod
def _exec_ivy(cls, ivy, confs, ivyxml, args, jvm_options, executor,
workunit_name, workunit_factory):
ivy = ivy or Bootstrapper.default_ivy()
ivy_args = ['-ivy', ivyxml]
ivy_args.append('-confs')
ivy_args.extend(confs)
ivy_args.extend(args)
ivy_jvm_options = list(jvm_options)
# Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
ivy_jvm_options.append('-Dsun.io.useCanonCaches=false')
runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor)
try:
with ivy.resolution_lock:
result = execute_runner(runner, workunit_factory=workunit_factory,
workunit_name=workunit_name)
if result != 0:
raise IvyUtils.IvyError('Ivy returned {result}. cmd={cmd}'.format(result=result,
cmd=runner.cmd))
except runner.executor.Error as e:
raise IvyUtils.IvyError(e)
@classmethod
def construct_and_load_hardlink_map(cls, hardlink_dir, ivy_repository_cache_dir,
ivy_cache_classpath_filename, hardlink_classpath_filename):
# Make our actual classpath be hardlinks, so that the paths are uniform across systems.
# Note that we must do this even if we read the raw_target_classpath_file from the artifact
# cache. If we cache the target_classpath_file we won't know how to create the hardlinks.
with IvyUtils._hardlink_map_lock:
# A common dir for hardlinks into the ivy2 cache. This ensures that paths to jars
# in artifact-cached analysis files are consistent across systems.
# Note that we have one global, well-known hardlink dir, again so that paths are
# consistent across builds.
hardlink_map = cls._hardlink_cachepath(ivy_repository_cache_dir,
ivy_cache_classpath_filename,
hardlink_dir,
hardlink_classpath_filename)
classpath = cls._load_classpath_from_cachepath(hardlink_classpath_filename)
return classpath, hardlink_map
@classmethod
def _hardlink_cachepath(cls, ivy_repository_cache_dir, inpath, hardlink_dir, outpath):
"""hardlinks all paths listed in inpath that are under ivy_repository_cache_dir into hardlink_dir.
If there is an existing hardlink for a file under inpath, it is used rather than creating
a new hardlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> hardlink to that path.
"""
safe_mkdir(hardlink_dir)
# The ivy_repository_cache_dir might itself be a hardlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the hardlink'ed path and the realpath to the jar to the hardlink map.
real_ivy_cache_dir = os.path.realpath(ivy_repository_cache_dir)
hardlink_map = OrderedDict()
inpaths = cls._load_classpath_from_cachepath(inpath)
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
hardlink_map[path] = os.path.join(hardlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't hardlink it.
hardlink_map[path] = path
# Create hardlinks for paths in the ivy cache dir.
for path, hardlink in six.iteritems(hardlink_map):
if path == hardlink:
# Skip paths that aren't going to be hardlinked.
continue
safe_mkdir(os.path.dirname(hardlink))
safe_hardlink_or_copy(path, hardlink)
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(hardlink_map.values())))
return dict(hardlink_map)
@classmethod
def xml_report_path(cls, resolution_cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:API: public
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(resolution_cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, conf, path):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:API: public
:param string conf: the ivy conf name (e.g. "default")
:param string path: The path to the ivy report file.
:returns: The info in the xml report.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo(conf)
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
classifier = artifact.get('extra-classifier')
ext = artifact.get('ext')
ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev,
classifier=classifier, ext=ext)
artifact_cache_path = artifact.get('location')
ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers))
ret.add_module(ivy_module)
return ret
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None,
pinned_artifacts=None, jar_dep_manager=None):
if not resolve_hash_name:
resolve_hash_name = Target.maybe_readable_identify(targets)
return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts,
jar_dep_manager)
@classmethod
def _generate_resolve_ivy(cls, jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts=None,
jar_dep_manager=None):
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars = jars_by_key.setdefault((jar.org, jar.name), [])
jars.append(jar)
manager = jar_dep_manager or JarDependencyManagement.global_instance()
artifact_set = PinnedJarArtifactSet(pinned_artifacts) # Copy, because we're modifying it.
for jars in jars_by_key.values():
for i, dep in enumerate(jars):
direct_coord = M2Coordinate.create(dep)
managed_coord = artifact_set[direct_coord]
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
jars[i] = dep.copy(rev=coord.rev)
elif dep.force:
# If this dependency is marked as 'force' and there is no version conflict, use the normal
# pants behavior for 'force'.
artifact_set.put(direct_coord)
dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()]
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
overrides = [cls._generate_override_template(_coord) for _coord in artifact_set]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name):
"""Generates an ivy xml with all jars marked as intransitive using the all conflict manager."""
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
# Use org name _and_ rev so that we can have dependencies with different versions. This will
# allow for batching fetching if we want to do that.
jars_by_key = OrderedDict()
for jar in jars:
jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar)
dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()]
template_data = TemplateData(org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def _write_ivy_xml_file(cls, ivyxml, template_data, template_relpath):
template_text = pkgutil.get_data(__name__, template_relpath).decode('utf-8')
generator = Generator(template_text, lib=template_data)
with safe_open(ivyxml, 'w') as output:
generator.write(output)
@classmethod
def calculate_classpath(cls, targets):
"""Creates a consistent classpath and list of excludes for the passed targets.
It also modifies the JarDependency objects' excludes to contain all the jars excluded by
provides.
:param iterable targets: List of targets to collect JarDependencies and excludes from.
:returns: A pair of a list of JarDependencies, and a set of excludes to apply globally.
"""
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coordinate = (jar.org, jar.name, jar.classifier)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing,
proposed=jar)
def collect_jars(target):
if isinstance(target, JarLibrary):
for jar in target.jar_dependencies:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not (isinstance(target, ExportableJvmLibrary) and target.provides):
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
new_jars = OrderedDict()
for coordinate, jar in jars.items():
new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes)
jars = new_jars
return list(jars.values()), global_excludes
@classmethod
def _resolve_conflict(cls, existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
@classmethod
def _generate_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
mutable=jar.mutable,
force=jar.force,
transitive=jar.transitive)
for jar in jars)
if len(global_dep_attributes) != 1:
# TODO: Need to provide information about where these came from - could be
# far-flung JarLibrary targets. The jars here were collected from targets via
# `calculate_classpath` above so executing this step there instead may make more
# sense.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
excludes = set()
for jar in jars:
excludes.update(jar.excludes)
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
force=jar_attributes.force,
transitive=jar_attributes.transitive,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[cls._generate_exclude_template(exclude) for exclude in excludes])
return template
@classmethod
def _generate_fetch_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
transitive=False,
mutable=jar.mutable,
force=True)
for jar in jars)
if len(global_dep_attributes) != 1:
# If we batch fetches and assume conflict manager all, we could ignore these.
# Leaving this here for now.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[])
return template
| apache-2.0 | -425,431,684,784,504,900 | 40.837684 | 111 | 0.651765 | false | 3.948957 | false | false | false |
patochectp/navitia | source/tyr/migrations/versions/3e56c7e0a4a4_create_billing_plan_table.py | 1 | 2152 | """Create billing_plan table
Revision ID: 3e56c7e0a4a4
Revises: 3aaddd5707bd
Create Date: 2015-11-05 13:30:32.460413
"""
revision = '3e56c7e0a4a4'
down_revision = '3aaddd5707bd'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import Enum
def upgrade():
op.create_table(
'billing_plan',
sa.Column('id', sa.Integer(), primary_key=True, nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('max_request_count', sa.Integer(), nullable=True),
sa.Column('max_object_count', sa.Integer(), nullable=True),
sa.Column('default', sa.Boolean(), nullable=False),
sa.Column('end_point_id', sa.Integer(), nullable=False, server_default='1'),
)
op.create_foreign_key("fk_billing_plan_end_point", "billing_plan", "end_point", ["end_point_id"], ["id"])
op.execute(
"INSERT INTO end_point (name, \"default\") SELECT 'sncf',false WHERE NOT EXISTS (SELECT id FROM end_point WHERE name = 'sncf');"
)
op.execute(
"INSERT INTO billing_plan (name, max_request_count, max_object_count, \"default\", end_point_id) VALUES ('nav_dev',3000,NULL,false,1),('nav_ent',NULL,NULL,false,1),('nav_ctp',NULL,NULL,true,1),('sncf_dev',3000,60000,true,(SELECT id FROM end_point WHERE name='sncf')),('sncf_ent',NULL,NULL,false,(SELECT id FROM end_point WHERE name='sncf'));"
)
op.add_column(u'user', sa.Column('billing_plan_id', sa.Integer(), nullable=True))
op.create_foreign_key("fk_user_billing_plan", "user", "billing_plan", ["billing_plan_id"], ["id"])
op.execute(
"UPDATE public.user SET billing_plan_id = (SELECT b.id FROM billing_plan b WHERE b.default AND end_point_id=1) WHERE end_point_id=1;"
)
op.execute(
"UPDATE public.user u SET billing_plan_id = (SELECT b.id FROM billing_plan b INNER JOIN end_point ep ON ep.id = b.end_point_id WHERE b.default AND ep.name='sncf') FROM end_point ep WHERE ep.id = u.end_point_id AND ep.name='sncf';"
)
def downgrade():
op.drop_column('user', 'billing_plan_id')
op.drop_table('billing_plan')
| agpl-3.0 | 2,663,967,288,322,674,700 | 40.384615 | 350 | 0.664963 | false | 3.096403 | false | false | false |
vincegogh/ByteOS | util/syscall_gen.py | 1 | 1793 | import sys as _sys
if len(_sys.argv) != 2:
print("Incorrect number of arguments")
exit(-1)
elif ["c", "h", "asm"].index(_sys.argv[1]) == -1:
print("Invalid filetype %s" % _sys.argv[1])
exit(-1)
print("\t\033[32;1mGenerating\033[0m include/gen/syscall_gen.%s" % _sys.argv[1])
syscall_list = []
def syscall(name, args="void"):
syscall_list.append({ "name": name, "args": args })
syscall("write", "char c")
syscall("fork", "uint64_t flags, struct callee_regs *regs, virtaddr_t return_addr")
syscall("exit", "int code")
syscall("sched_yield")
decls = [
"#include \"mm_types.h\"",
"#include \"proc.h\"",
"",
]
defs = [
"#define ENOSYS 0xFFFFFFFFFFFFFFFFLL",
"#define NUM_SYSCALLS %d" % len(syscall_list)
]
table = [
"syscall_t syscall_table[NUM_SYSCALLS] = {"
]
asm_defs = [
"%define ENOSYS 0xFFFFFFFFFFFFFFFF",
"%%define NUM_SYSCALLS %d" % len(syscall_list)
]
for i in range(0, len(syscall_list)):
sys = syscall_list[i]
defs.append("#define SYSCALL_%s %d" % (sys["name"].upper(), i))
table.append("\t[SYSCALL_%s] = (syscall_t)syscall_%s," % (sys["name"].upper(), sys["name"]))
decls.append("int64_t syscall_%s(%s);" % (sys["name"], sys["args"]))
asm_defs.append("%%define SYSCALL_%s %d" % (sys["name"].upper(), i))
table.append("};")
defs = "\n".join(defs)
table = "\n".join(table)
decls = "\n".join(decls)
asm_defs = "\n".join(asm_defs)
h_out = """#pragma once
%s
%s
extern syscall_t syscall_table[NUM_SYSCALLS];
""" % (defs, decls)
c_out = """%s
""" % table
asm_out = """%s
""" % asm_defs
out_data = { "c": c_out, "h": h_out, "asm": asm_out }
prefix = "include/gen/syscall_gen"
path = prefix + "." + _sys.argv[1]
target_file = open(path, "w")
target_file.write(out_data[_sys.argv[1]])
target_file.close()
| mit | 3,375,265,367,951,926,000 | 22.906667 | 96 | 0.596765 | false | 2.636765 | false | false | false |
eseom/glide | glide/process.py | 1 | 5210 | import os
import multiprocessing
import asyncore
import datetime
class Status(object):
"""process status enum"""
REDY, RUNN, RSTT, STNG, KLNG, STPD, EXTD = \
'READY', 'RUNNING', 'RESTARTING', \
'STOPPING', 'KILLING', 'STOPPED', 'EXITED'
class Process(asyncore.file_dispatcher):
"""main process object"""
class Message(object):
"""container class of the emitted messages from the target process"""
def __init__(self, process, message):
self.process = process
self.message = message
def __str__(self):
return '%s: %s' % (self.process.name, self.message)
def __init__(self,
name,
path,
max_nl,
bm,
try_restart=-1,
kill_duration_time=20,
):
self.status = Status.REDY # initial status READY
self.name = name
self.path = path
self.max_nl = max_nl # max name length
self.bm = bm # blast module
self.try_restart = try_restart
self.kill_duration_time = kill_duration_time
self.bi = 0 # blast index
self.restarted = 0
self.rpi = 0
self.wpi = 0
self.status = Status.REDY
self.start_time = None
def start(self):
if self.status not in (Status.REDY, Status.STPD, Status.EXTD):
return False, 'already operating'
self.rpi, self.wpi = os.pipe()
self.process = multiprocessing.Process(
target=self.__execute,
args=(self.path, self.rpi, self.wpi)
)
self.process.start()
self.pid = self.process.pid
# register the pipe's reader descriptor to asyncore
asyncore.file_dispatcher.__init__(self, self.rpi)
self.status = Status.RUNN
self.start_time = datetime.datetime.now()
self.elapsed_rule_time = None
return True, ''
def __execute(self, path, rpi, wpi):
pid = os.getpid()
# set the child process as a process group master itself
os.setpgid(pid, pid)
os.dup2(wpi, 1)
os.dup2(wpi, 2)
os.close(wpi)
os.close(rpi)
os.execv(path[0], path)
def handle_read(self):
data = []
try:
while True: # read data from the pipe's reader
d = self.recv(1)
if d == '\n':
break
data.append(d)
# blast to the registered blast module
self.bm(Process.Message(self, ''.join(data)), self.bi)
self.bi += 1
except OSError: # tried to read after the descriptor closed
pass
def writable(self):
"""trick: add timeout callback implementation"""
if self.elapsed_rule_time:
self.elapsed_time = datetime.datetime.now() - self.elapsed_rule_time
if self.elapsed_time > \
datetime.timedelta(seconds=self.kill_duration_time):
os.kill(self.pid, 9)
return False
def terminate(self):
try:
self.elapsed_rule_time = datetime.datetime.now()
self.process.terminate()
except OSError: # no such process id
pass
def stop(self):
if self.status != Status.RUNN:
return False, 'not running'
self.status = Status.STNG
self.terminate()
return True, ''
def restart(self):
if self.status != Status.RUNN:
return False, 'not running'
self.status = Status.RSTT
self.terminate()
return True, ''
def hangup(self):
if self.status != Status.RUNN:
return False, 'not running'
os.kill(self.proc.pid, 1)
return True, ''
def alarm(self):
if self.status != Status.RUNN:
return False, 'not running'
os.kill(self.proc.pid, 14)
return True, ''
def cleanup(self):
for descriptor in [self.rpi, self.wpi]:
try:
os.close(descriptor)
except:
pass
asyncore.file_dispatcher.close(self)
if ((self.try_restart == -1 or self.try_restart > self.restarted) and
self.status == Status.EXTD) or self.status == Status.RSTT:
self.restarted += 1
self.status = Status.REDY
self.start()
return self
else:
self.status = Status.STPD
return None
def handle_error(self):
nil, t, v, tbinfo = asyncore.compact_traceback()
print '---', nil, t, v, tbinfo
def __str__(self):
if self.status not in (Status.STPD, Status.REDY, Status.EXTD):
tmpl = '%-' + str(self.max_nl) + \
's %10s pid %5s, uptime %s sec'
return tmpl % (self.name,
self.status,
self.pid,
datetime.datetime.now() - self.start_time)
else:
tmpl = '%-' + str(self.max_nl) + 's %10s'
return tmpl % (self.name,
self.status,)
| mit | -5,170,071,940,740,597,000 | 29.828402 | 80 | 0.51881 | false | 3.998465 | false | false | false |
rjschwei/azure-sdk-for-python | azure-mgmt-logic/azure/mgmt/logic/models/x12_validation_override.py | 1 | 3184 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class X12ValidationOverride(Model):
"""X12ValidationOverride.
:param message_id: The message id on which the validation settings has to
be applied.
:type message_id: str
:param validate_edi_types: The value indicating whether to validate EDI
types.
:type validate_edi_types: bool
:param validate_xsd_types: The value indicating whether to validate XSD
types.
:type validate_xsd_types: bool
:param allow_leading_and_trailing_spaces_and_zeroes: The value indicating
whether to allow leading and trailing spaces and zeroes.
:type allow_leading_and_trailing_spaces_and_zeroes: bool
:param validate_character_set: The value indicating whether to validate
character Set.
:type validate_character_set: bool
:param trim_leading_and_trailing_spaces_and_zeroes: The value indicating
whether to trim leading and trailing spaces and zeroes.
:type trim_leading_and_trailing_spaces_and_zeroes: bool
:param trailing_separator_policy: The trailing separator policy. Possible
values include: 'NotSpecified', 'NotAllowed', 'Optional', 'Mandatory'
:type trailing_separator_policy: str or :class:`TrailingSeparatorPolicy
<azure.mgmt.logic.models.TrailingSeparatorPolicy>`
"""
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'validate_edi_types': {'key': 'validateEDITypes', 'type': 'bool'},
'validate_xsd_types': {'key': 'validateXSDTypes', 'type': 'bool'},
'allow_leading_and_trailing_spaces_and_zeroes': {'key': 'allowLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'validate_character_set': {'key': 'validateCharacterSet', 'type': 'bool'},
'trim_leading_and_trailing_spaces_and_zeroes': {'key': 'trimLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trailing_separator_policy': {'key': 'trailingSeparatorPolicy', 'type': 'TrailingSeparatorPolicy'},
}
def __init__(self, message_id=None, validate_edi_types=None, validate_xsd_types=None, allow_leading_and_trailing_spaces_and_zeroes=None, validate_character_set=None, trim_leading_and_trailing_spaces_and_zeroes=None, trailing_separator_policy=None):
self.message_id = message_id
self.validate_edi_types = validate_edi_types
self.validate_xsd_types = validate_xsd_types
self.allow_leading_and_trailing_spaces_and_zeroes = allow_leading_and_trailing_spaces_and_zeroes
self.validate_character_set = validate_character_set
self.trim_leading_and_trailing_spaces_and_zeroes = trim_leading_and_trailing_spaces_and_zeroes
self.trailing_separator_policy = trailing_separator_policy
| mit | 9,220,224,516,508,743,000 | 52.966102 | 252 | 0.685616 | false | 3.950372 | false | false | false |
iagcl/data_pipeline | data_pipeline/db/filedb.py | 1 | 3423 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: filedb
# Purpose: Represents a file-based "database"
#
# Notes:
#
###############################################################################
import glob
import os
import data_pipeline.constants.const as const
import data_pipeline.utils.filesystem as fsutils
from .file_query_results import FileQueryResults
from .db import Db
from os.path import basename
from data_pipeline.stream.file_reader import FileReader
class FileDb(Db):
def __init__(self):
super(FileDb, self).__init__()
self._file_reader = None
self._closed = True # Maintains the "closed" state for posterity
self._data_dir = None
@property
def dbtype(self):
return const.FILE
def _connect(self, connection_details):
self._closed = False
self._data_dir = connection_details.data_dir
def execute_stored_proc(self, stored_proc):
pass
def execute_query(self, tablename, arraysize, values=(),
post_process_func=None):
"""Return wrapper around file handle that reads line by line
:param str tablename
Files containing this tablename as its basename
will be read line-by-line as the query result.
:param int arraysize Unused
:param tuple values Unused
:param tuple post_process_func
Function to execute on each record after retrieval
"""
if tablename is None:
return
filename = self.get_data_filename(tablename)
if filename is None:
return
self._logger.info("Querying file: {f}".format(f=filename))
return FileQueryResults(filename, post_process_func)
def get_data_filename(self, tablename):
glob_pattern = "{}*".format(os.path.join(self._data_dir, tablename))
matching_data_files = fsutils.insensitive_glob(glob_pattern)
for f in matching_data_files:
filename_with_ext = basename(f)
dot_i = filename_with_ext.find(const.DOT, 1)
if dot_i > 0:
filename_without_ext = filename_with_ext[:dot_i]
else:
filename_without_ext = filename_with_ext
if filename_without_ext.lower() == tablename.lower():
return f
return None
def execute(self, sql, values=(), log_sql=True):
pass
def commit(self):
pass
def rollback(self):
pass
def closed(self):
return self._closed
def disconnect(self):
self._closed = True
| apache-2.0 | -8,469,073,670,559,711,000 | 31.292453 | 79 | 0.621677 | false | 4.354962 | false | false | false |
stuliveshere/PySeis | docs/notebooks/toolbox/toolbox.py | 1 | 9786 | import numpy as np
import matplotlib.pyplot as pylab
from matplotlib.widgets import Slider
#==================================================
# decorators
#==================================================
def io(func):
'''
an io decorator that allows
input/output to be either a filename
(i.e. a string) or an array
'''
def wrapped(*args, **kwargs) :
if type(args[0]) == type(''):
workspace = read(args[0])
else:
workspace = args[0]
result = func(workspace, **kwargs)
if type(result) != type(None):
if type(args[1]) == type(''):
return write(result, args[1])
else:
return result
return wrapped
#==================================================
# display tools
#==================================================
class KeyHandler(object):
def __init__(self, fig, ax, dataset, kwargs):
self.fig = fig
self.ax = ax
self.kwargs = kwargs
self.dataset = dataset
self.start = 0
if kwargs['primary'] == None:
self.slice = self.dataset
else:
keys = np.unique(dataset[kwargs['primary']])
self.keys = keys[::kwargs['step']]
self.nkeys = self.keys.size
self.ensemble()
if 'clip' in kwargs and kwargs['clip'] != 0:
self.clip = kwargs['clip']
else:
self.clip = np.mean(np.abs(self.dataset['trace']))
print 'PySeis Seismic Viewer'
print 'type "h" for help'
self.draw()
def __call__(self, e):
print e.xdata, e.ydata
if e.key == "right":
self.start += 1
self.ensemble()
elif e.key == "left":
self.start -= 1
self.ensemble()
elif e.key == "up":
self.clip /= 1.1
print self.clip
elif e.key == "down":
self.clip *= 1.1
print self.clip
elif e.key == "h":
print "right arrow: next gather"
print "left arrow: last gather"
print "up arrow: hotter"
print "down arrow: colder"
print "clip=", self.clip
else:
return
self.draw()
def draw(self):
self.ax.cla()
self.im = self.ax.imshow(self.slice['trace'].T, aspect='auto', cmap='Greys', vmax =self.clip, vmin=-1*self.clip)
try:
self.ax.set_title('%s = %d' %(self.kwargs['primary'], self.keys[self.start]))
except AttributeError:
pass
self.fig.canvas.draw()
def ensemble(self):
try:
self.slice = self.dataset[self.dataset[self.kwargs['primary']] == self.keys[self.start]]
except IndexError:
self.start = 0
@io
def display(dataset, **kwargs):
'''
iterates through dataset using
left and right keys
parameters required:
primary key
seconary key
step size
works well unless you want to load a big dataset...
'''
fig = pylab.figure()
ax = fig.add_subplot(111)
eventManager = KeyHandler(fig, ax, dataset, kwargs)
fig.canvas.mpl_connect('key_press_event',eventManager)
def scan(dataset):
print " %0-35s: %0-15s %s" %('key', 'min', 'max')
print "========================================="
for key in np.result_type(dataset).descr:
a = np.amin(dataset[key[0]])
b = np.amax(dataset[key[0]])
if (a != 0) and (b != 0):
print "%0-35s %0-15.3f %.3f" %(key, a, b)
print "========================================="
#~ def build_vels(times, velocities, ns=1000, dt=0.001):
#~ '''builds a full velocity trace from a list of vels and times'''
#~ tx = np.linspace(dt, dt*ns, ns)
#~ vels = np.interp(tx, times, velocities)
#~ vels = np.pad(vels, (100,100), 'reflect')
#~ vels = np.convolve(np.ones(100.0)/100.0, vels, mode='same')
#~ vels = vels[100:-100]
#~ return vels
@io
def cp(workspace, **params):
return workspace
@io
def agc(workspace, window=100, **params):
'''
automatic gain control
inputs:
window
'''
vec = np.ones(window, 'f')
func = np.apply_along_axis(lambda m: np.convolve(np.abs(m), vec, mode='same'), axis=-1, arr=workspace['trace'])
workspace['trace'] /= func
workspace['trace'][~np.isfinite(workspace['trace'])] = 0
workspace['trace'] /= np.amax(np.abs(workspace['trace']))
return workspace
def ricker(f, length=0.512, dt=0.001):
t = np.linspace(-length/2, (length-dt)/2, length/dt)
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
y = np.around(y, 10)
inds = np.nonzero(y)[0]
return y[np.amin(inds):np.amax(inds)]
def conv(workspace, wavelet):
workspace['trace'] = np.apply_along_axis(lambda m: np.convolve(m, wavelet, mode='same'), axis=-1, arr=workspace['trace'])
return workspace
@io
def fx(workspace, **params):
f = np.abs(np.fft.rfft(workspace['trace'], axis=-1))
correction = np.mean(np.abs(f), axis=-1).reshape(-1,1)
f /= correction
f = 20.0*np.log10(f)[:,::-1]
freq = np.fft.rfftfreq(params['ns'], params['dt'])
print params['ns'], params['dt']
hmin = np.amin(workspace['cdp'])
hmax = np.amax(workspace['cdp'])
vmin = np.amin(freq)
vmax = np.amax(freq)
extent=[hmin,hmax,vmin,vmax]
pylab.imshow(f.T, aspect='auto', extent=extent)
def db(data):
return 20.0*np.log10(data)
import numpy as np
su_header_dtype = np.dtype([
('tracl', np.int32),
('tracr', np.int32),
('fldr', np.int32),
('tracf', np.int32),
('ep', np.int32),
('cdp', np.int32),
('cdpt', np.int32),
('trid', np.int16),
('nvs', np.int16),
('nhs', np.int16),
('duse', np.int16),
('offset', np.int32),
('gelev', np.int32),
('selev', np.int32),
('sdepth', np.int32),
('gdel', np.int32),
('sdel', np.int32),
('swdep', np.int32),
('gwdep', np.int32),
('scalel', np.int16),
('scalco', np.int16),
('sx', np.int32),
('sy', np.int32),
('gx', np.int32),
('gy', np.int32),
('counit', np.int16),
('wevel', np.int16),
('swevel', np.int16),
('sut', np.int16),
('gut', np.int16),
('sstat', np.int16),
('gstat', np.int16),
('tstat', np.int16),
('laga', np.int16),
('lagb', np.int16),
('delrt', np.int16),
('muts', np.int16),
('mute', np.int16),
('ns', np.uint16),
('dt', np.uint16),
('gain', np.int16),
('igc', np.int16),
('igi', np.int16),
('corr', np.int16),
('sfs', np.int16),
('sfe', np.int16),
('slen', np.int16),
('styp', np.int16),
('stas', np.int16),
('stae', np.int16),
('tatyp', np.int16),
('afilf', np.int16),
('afils', np.int16),
('nofilf', np.int16),
('nofils', np.int16),
('lcf', np.int16),
('hcf', np.int16),
('lcs', np.int16),
('hcs', np.int16),
('year', np.int16),
('day', np.int16),
('hour', np.int16),
('minute', np.int16),
('sec', np.int16),
('timebas', np.int16),
('trwf', np.int16),
('grnors', np.int16),
('grnofr', np.int16),
('grnlof', np.int16),
('gaps', np.int16),
('otrav', np.int16), #179,180
('d1', np.float32), #181,184
('f1', np.float32), #185,188
('d2', np.float32), #189,192
('f2', np.float32), #193, 196
('ShotPoint', np.int32), #197,200
('unscale', np.int16), #201, 204
('TraceValueMeasurementUnit', np.int16),
('TransductionConstantMantissa', np.int32),
('TransductionConstantPower', np.int16),
('TransductionUnit', np.int16),
('TraceIdentifier', np.int16),
('ScalarTraceHeader', np.int16),
('SourceType', np.int16),
('SourceEnergyDirectionMantissa', np.int32),
('SourceEnergyDirectionExponent', np.int16),
('SourceMeasurementMantissa', np.int32),
('SourceMeasurementExponent', np.int16),
('SourceMeasurementUnit', np.int16),
('UnassignedInt1', np.int32),
('ns1', np.int32),
])
def typeSU(ns):
return np.dtype(su_header_dtype.descr + [('trace', ('<f4',ns))])
def readSUheader(filename):
raw = open(filename, 'rb').read()
return np.fromstring(raw, dtype=su_header_dtype, count=1)
def read(filename=None):
if filename == None:
raw= sys.stdin.read()
else:
raw = open(filename, 'rb').read()
return readData(raw)
def readData(raw):
su_header = np.fromstring(raw, dtype=su_header_dtype, count=1)
ns = su_header['ns'][0]
file_dtype = typeSU(ns)
data = np.fromstring(raw, dtype=file_dtype)
return data
def write(data, filename=None):
if filename == None:
data.tofile(sys.stdout)
else:
data.tofile(filename)
| mit | -8,316,606,742,452,979,000 | 29.391304 | 129 | 0.474249 | false | 3.491259 | false | false | false |
amdor/skyscraper | tests/test_scraper_service.py | 1 | 1401 | import os
import unittest
from skyscraper.scraper_service import ScraperServiceFactory
from skyscraper.utils.constants import SPEEDOMETER_KEY, AGE_KEY, CAR_KEY, PRICE_KEY, POWER_KEY, CURRENCY_KEY
from common_test_utils import gather_extension_files, VALIDATION_DATA
class TestScraping(unittest.TestCase):
files_under_test = set()
@classmethod
def setUpClass(cls):
path = os.path.dirname(os.path.realpath(__file__))
cls.files_under_test = gather_extension_files(path)
def test_scraping(self):
for file_name in [*VALIDATION_DATA]:
abs_path = list(filter(lambda test_file: test_file.endswith(file_name), self.files_under_test))[0]
with open(abs_path, 'rb') as html_file:
file_content = html_file.read()
file_content = str(file_content, encoding='utf-8')
scraper = ScraperServiceFactory.get_for_dict({file_name: file_content})
car_data = scraper.get_car_data()
actual_value = car_data[0]
expected_value = VALIDATION_DATA[file_name]
print(actual_value[CAR_KEY] + ' assertions')
self.assertEqual(expected_value[SPEEDOMETER_KEY], actual_value[SPEEDOMETER_KEY])
self.assertEqual(expected_value[AGE_KEY], actual_value[AGE_KEY])
self.assertEqual(expected_value[PRICE_KEY], actual_value[PRICE_KEY])
self.assertEqual(expected_value[POWER_KEY], actual_value[POWER_KEY])
self.assertEqual(expected_value[CURRENCY_KEY], actual_value[CURRENCY_KEY])
| mit | 1,581,957,246,233,209,900 | 42.78125 | 108 | 0.741613 | false | 3.025918 | true | false | false |
FabriceSalvaire/grouped-purchase-order | GroupedPurchaseOrder/views/account.py | 1 | 13035 | ####################################################################################################
#
# GroupedPurchaseOrder - A Django Application.
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
# from django.forms.widgets import HiddenInput
from django.contrib import messages
from django.contrib.auth import forms
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.forms import ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils import translation
from django.utils.translation import ugettext as _
####################################################################################################
from GroupedPurchaseOrder.models import Profile
from GroupedPurchaseOrder.utils import send_localized_mail
####################################################################################################
class AuthenticationForm(forms.AuthenticationForm):
"""Override the default AuthenticationForm in order to add HTML5 attributes. This is the only
change done and needed
"""
##############################################
def __init__(self, *args, **kwargs):
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['password'].widget.attrs['class'] = 'form-control'
self.fields['password'].widget.attrs['placeholder'] = _('Password')
self.fields['username'].widget.attrs['autofocus'] = 'autofocus'
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['username'].widget.attrs['placeholder'] = _('Username')
####################################################################################################
class PasswordChangeForm(forms.PasswordChangeForm):
"""Override the default PasswordChangeForm in order to add HTML5 attributes. This is the only
change done and needed
"""
def __init__(self, *args, **kwargs):
super(PasswordChangeForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['new_password1'].widget.attrs['class'] = 'form-control'
self.fields['new_password1'].widget.attrs['placeholder'] = _('New password')
self.fields['new_password2'].widget.attrs['class'] = 'form-control'
self.fields['new_password2'].widget.attrs['placeholder'] = _('New password')
self.fields['old_password'].widget.attrs['autofocus'] = 'autofocus'
self.fields['old_password'].widget.attrs['class'] = 'form-control'
self.fields['old_password'].widget.attrs['placeholder'] = _('Old password')
####################################################################################################
class PasswordResetForm(forms.PasswordResetForm):
"""Override the default PasswordResetForm in order to add HTML5 attributes. This is the only
change done and needed
"""
##############################################
def __init__(self, *args, **kwargs):
super(PasswordResetForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['email'].widget.attrs['autofocus'] = 'autofocus'
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['placeholder'] = _('email')
####################################################################################################
class SetPasswordForm(forms.SetPasswordForm):
"""Override the default SetPasswordForm in order to add HTML5 attributes. This is the only change
done and needed
"""
##############################################
def __init__(self, *args, **kwargs):
super(SetPasswordForm, self).__init__(*args, **kwargs)
# Add HTML5 attributes
self.fields['new_password1'].widget.attrs['autofocus'] = 'autofocus'
self.fields['new_password1'].widget.attrs['class'] = 'form-control'
self.fields['new_password1'].widget.attrs['placeholder'] = _('New password')
self.fields['new_password2'].widget.attrs['class'] = 'form-control'
self.fields['new_password2'].widget.attrs['placeholder'] = _('New password')
####################################################################################################
class UserCreationForm(forms.UserCreationForm):
"""Override the default UserCreationForm in order to add HTML5 attributes.
"""
##############################################
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2', 'first_name', 'last_name')
##############################################
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
# email, first_name and last_name are required
self.fields['email'].required = True
self.fields['first_name'].required = True
# Add HTML5 attributes
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['first_name'].widget.attrs['class'] = 'form-control'
self.fields['last_name'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['placeholder'] = _('Password')
self.fields['password2'].widget.attrs['class'] = 'form-control'
self.fields['password2'].widget.attrs['placeholder'] = _('Password')
self.fields['username'].widget.attrs['autofocus'] = 'autofocus'
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['username'].widget.attrs['placeholder'] = _('Username')
##############################################
def save(self, commit=True):
"""Create the new User and the associated Profile The User is not activated until the
register_confirm url has been visited
"""
if not commit:
raise NotImplementedError('Cannot create Profile and User without commit')
user = super(UserCreationForm, self).save(commit=False)
user.is_active = False
user.save()
profile = Profile(user=user)
profile.save()
return user
####################################################################################################
class UserUpdateForm(ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name')
##############################################
def __init__(self, *args, **kwargs):
super(UserUpdateForm, self).__init__(*args, **kwargs)
# first_name and last_name are required
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['autofocus'] = 'autofocus'
self.fields['first_name'].widget.attrs['class'] = 'form-control'
self.fields['last_name'].widget.attrs['class'] = 'form-control'
####################################################################################################
class ProfileUpdateForm(ModelForm):
class Meta:
model = Profile
fields = ('phone_number', 'language', 'timezone')
##############################################
def __init__(self, *args, **kwargs):
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
self.fields['language'].required = True
self.fields['timezone'].required = True
self.fields['language'].widget.attrs['class'] = 'form-control'
self.fields['phone_number'].widget.attrs['class'] = 'form-control'
self.fields['timezone'].widget.attrs['class'] = 'form-control'
####################################################################################################
def register(request):
if request.method == 'POST':
user_form = UserCreationForm(request.POST)
if user_form.is_valid():
new_user = user_form.save()
send_localized_mail(new_user, _('Subscription to G.P.O.'),
'GroupedPurchaseOrder/account/register_email.html',
{'URL': request.build_absolute_uri(reverse('accounts.register.confirm',
args=[new_user.pk,
new_user.profile.hash_id])),
'fullname': new_user.get_full_name()})
return render(request, 'GroupedPurchaseOrder/account/register_end.html')
else:
messages.error(request, _("Some information are missing or mistyped"))
else:
user_form = UserCreationForm()
return render(request, 'GroupedPurchaseOrder/account/register.html', {'user_form': user_form})
####################################################################################################
def register_confirm(request, user_id, user_hash):
"""Check that the User and the Hash are correct before activating the User
"""
user = get_object_or_404(User, pk=user_id, profile__hash_id=user_hash)
user.is_active = True
user.save()
return render(request, 'GroupedPurchaseOrder/account/confirm.html', {'user': user})
####################################################################################################
@login_required
def profile(request):
# Force the user to provide language and timezone
if not request.user.profile.language or request.user.profile.timezone == 'UTC':
messages.error(request, _("You should update your timezone. Without it G.P.O. will not work as expected."))
return HttpResponseRedirect(reverse('accounts.profile.update'))
return render(request, 'GroupedPurchaseOrder/account/profile.html')
####################################################################################################
@login_required
def update(request):
profile = get_object_or_404(Profile, user__pk=request.user.pk)
if request.method == 'POST':
user_form = UserUpdateForm(request.POST, instance=request.user)
profile_form = ProfileUpdateForm(request.POST, instance=profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile = profile_form.save()
# Update the language code and activate it for the message
if profile.language:
request.session['django_language'] = profile.language
translation.activate(profile.language)
# Update the timezone if needed
if profile.timezone:
request.session['django_timezone'] = profile.timezone
# Print the message
messages.success(request, _("Personnal information updated"))
return HttpResponseRedirect(reverse('accounts.profile'))
else:
user_form = UserUpdateForm(instance=request.user)
profile_form = ProfileUpdateForm(instance=profile)
return render(request, 'GroupedPurchaseOrder/account/update.html',
{'user_form': user_form, 'profile_form': profile_form})
####################################################################################################
@login_required
def password_change_done(request):
messages.success(request, _('Password changed successfully'))
return HttpResponseRedirect(reverse('accounts.profile'))
####################################################################################################
def password_reset_done(request):
return render(request, 'GroupedPurchaseOrder/account/password_reset_done.html')
####################################################################################################
@login_required
def delete(request):
request.user.delete()
return HttpResponseRedirect(reverse('index'))
####################################################################################################
#
# End
#
####################################################################################################
| agpl-3.0 | -8,662,595,971,610,037,000 | 38.984663 | 115 | 0.531109 | false | 4.889347 | false | false | false |
bros-bioinfo/bros-bioinfo.github.io | COURS/M1/SEMESTRE1/ALGO_PROG/ALGO/Eliot/Pile_File.py | 1 | 1911 | import random as rd
class Pile(list):
def creer_pile(self): # Renvoie une nouvelle pile vide.
return self
def empiler(self, e): # Empile l ’ élément ’ e ’ dans la pile ’P’.
self.append(e)
def depiler(self): # Dépile un élément de la pile ’P’ et renvoie cet élément .
return self.pop()
def taille_pile(self): # renvoie le nombre d’ éléments contenu dans la pile ’P’.
return len(self)
# class File(deque):
#
# def creer_file(self): # Renvoie une nouvelle f i l e vide .
# return self
#
# def enfiler(self, e): # Enfile l ’ élément ’e’ dans la file ’F’.
# self.append(e)
#
# def defiler(self): # Dé f i l e un é l ément de la f i l e ’F ’ et renvoie cet é l ément .
# return self.popleft()
#
# def taille_file(self): # renvoie l e nombre d ’ é l éments contenu dans la f i l e ’F ’.
# return len(self)
#
#
# pile = Pile()
# pile.empiler(1)
# pile.empiler(2)
# pile.empiler(3)
# print('LENGTH = ', pile.taille_pile())
# print(pile.depiler())
# print(pile.depiler())
# print(pile.depiler())
#
# file = File()
# file.enfiler(1)
# file.enfiler(2)
# file.enfiler(3)
# print('LENGTH = ', file.taille_file())
# print(file.defiler())
# print(file.defiler())
# print(file.defiler())
p_entree = Pile(range(10))
rd.shuffle(p_entree)
# print(enter)
p_sortie = Pile([])
# print(sortie)
def trier(p_entree, p_sortie):
for _ in range(len(p_entree)):
print(p_entree, p_sortie, "\n")
mini = p_entree.depiler()
n = len(p_entree)
for x in range(n):
a = p_entree.depiler()
if a < mini:
mini, a = a, mini
p_sortie.empiler(a)
for x in range(n):
p_entree.empiler(p_sortie.depiler())
p_sortie.empiler(mini)
print(p_sortie)
| mit | -1,065,698,792,782,147,600 | 23.706667 | 105 | 0.571506 | false | 2.44137 | false | false | false |
Adamssss/projectEuler | Problem 001-150 Python/pb102.py | 1 | 1225 | import math
import time
t1 = time.time()
# read the trianlges into a list
f = open('pb102_triangles.txt','r')
tris= f.read().split('\n')
f.close()
def totri(tl):
triangle = []
temp = tl.split(',')
for i in range(0,3):
triangle.append([tonumber(temp[2*i]),tonumber(temp[2*i+1])])
return triangle
def tonumber(ts):
neg = False
result = 0
temp = ts[:]
if temp[0] == '-':
neg = True
temp = temp[1:]
for i in temp:
result = result*10 + ord(i)-48
if neg:
result *= -1
return result
def contain(tri):
if not onthesameside(tri[0],tri[1],tri[2]):
return False
if not onthesameside(tri[1],tri[2],tri[0]):
return False
if not onthesameside(tri[2],tri[0],tri[1]):
return False
return True
def onthesameside(A,B,C):
BA = [A[0]-B[0],A[1]-B[1]]
BO = [-B[0],-B[1]]
BC = [C[0]-B[0],C[1]-B[1]]
k = BO[1]/BO[0]
if BA[0]*k > BA[1] and BC[0]*k > BC[1]:
return False
if BA[0]*k < BA[1] and BC[0]*k < BC[1]:
return False
return True
count = 0
for i in range(0,1000):
if contain(totri(tris[i])):
count += 1
print(count)
print("time:",time.time()-t1)
| mit | -3,754,334,336,298,502,700 | 19.081967 | 68 | 0.533061 | false | 2.657267 | false | false | false |
quimaguirre/diana | diana/toolbox/parse_clinical_trials.py | 1 | 12367 | ##############################################################################
# Clinical trials parser
#
# eg 2013-2016
##############################################################################
import cPickle, os, re
def main():
#base_dir = "../data/ct/"
base_dir = "/home/eguney/data/ct/"
file_name = base_dir + "ct.csv"
output_data(base_dir, file_name)
return
def output_data(base_dir, file_name):
drug_to_ctids = get_interventions(base_dir, include_other_names=True) #False)
print len(drug_to_ctids), drug_to_ctids.items()[:5]
ctid_to_conditions = get_ctid_to_conditions(base_dir)
print len(ctid_to_conditions), ctid_to_conditions.items()[:5]
ctid_to_values = get_ctid_to_details(base_dir)
print len(ctid_to_values), ctid_to_values.items()[:5]
f = open(file_name, 'w')
f.write("Drug\tClinical trial Id\tPhase\tStatus\tFDA regulated\tWhy stopped\tResults date\tConditions\n")
for drug, ctids in drug_to_ctids.iteritems():
for ctid in ctids:
values = [ drug, ctid ]
if ctid in ctid_to_values:
#phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
values.extend(ctid_to_values[ctid])
if ctid in ctid_to_conditions:
conditions = ctid_to_conditions[ctid]
values.append(" | ".join(conditions))
f.write("%s\n" % "\t".join(values))
f.close()
return
def get_disease_specific_drugs(drug_to_diseases, phenotype_to_mesh_id):
disease_to_drugs = {}
mesh_id_to_phenotype = {}
for phenotype, mesh_id in phenotype_to_mesh_id.items():
mesh_id_to_phenotype[mesh_id] = phenotype
for drugbank_id, diseases in drug_to_diseases.iteritems():
for phenotype, dui, val in diseases:
if val > 0:
if dui in mesh_id_to_phenotype: # In the disease data set
disease = mesh_id_to_phenotype[dui].lower()
disease_to_drugs.setdefault(disease, set()).add(drugbank_id)
return disease_to_drugs
def get_drug_disease_mapping(base_dir, selected_drugs, name_to_drug, synonym_to_drug, mesh_id_to_name, mesh_id_to_name_with_synonyms, dump_file):
if os.path.exists(dump_file):
drug_to_diseases = cPickle.load(open(dump_file))
return drug_to_diseases
# Get mesh name to mesh id mapping
mesh_name_to_id = {}
for mesh_id, names in mesh_id_to_name_with_synonyms.iteritems():
for name in names:
for name_mod in [ name, name.replace(",", ""), name.replace("-", " "), name.replace(",", "").replace("-", " ") ]:
mesh_name_to_id[name_mod] = mesh_id
# Get CT info
drug_to_ctids, ctid_to_conditions, ctid_to_values = get_ct_data(base_dir, include_other_names=True)
# Get CT - MeSH disease mapping
intervention_to_mesh_name = {}
interventions = reduce(lambda x,y: x|y, ctid_to_conditions.values())
for intervention in interventions:
if intervention.endswith('s'):
intervention = intervention[:-1]
idx = intervention.find("(")
if idx != -1:
intervention = intervention[:idx].rstrip()
try:
exp = re.compile(r"\b%ss{,1}\b" % re.escape(intervention))
except:
print "Problem with regular expression:", intervention
for mesh_name, dui in mesh_name_to_id.iteritems():
m = exp.search(mesh_name)
if m is None:
continue
elif len(mesh_name.split()) != len(intervention.split()): # no partial overlap
continue
phenotype = mesh_id_to_name[dui]
intervention_to_mesh_name[intervention] = phenotype
break
#print len(intervention_to_mesh_name), intervention_to_mesh_name.items()[:5]
# Get interventions
phase_to_value = { "Phase 0": 0.5, "Phase 1": 0.6, "Phase 1/Phase 2": 0.65, "Phase 2": 0.7, "Phase 2/Phase 3": 0.75, "Phase 3": 0.8, "Phase 3/Phase 4":0.85, "Phase 4": 0.9, "N/A": 0.5 }
status_to_value = { "Terminated": -0.5, "Withdrawn": -1} #,"Completed", "Recruiting", "Not yet recruiting"
drug_to_diseases = {}
drug_to_diseases_n_study = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
if selected_drugs is not None and drugbank_id not in selected_drugs:
continue
phenotype_to_count = {}
for ctid in ctids:
phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
val = 0.5
if phase not in phase_to_value:
print "Unknown phase:", phase
if status in status_to_value and phase in phase_to_value:
val = phase_to_value[phase] - 0.1
for intervention in ctid_to_conditions[ctid]:
if intervention not in intervention_to_mesh_name:
continue
phenotype = intervention_to_mesh_name[intervention]
i = phenotype_to_count.setdefault(phenotype, 0)
phenotype_to_count[phenotype] = i + 1
dui = mesh_name_to_id[phenotype]
# Phase based value assignment
drug_to_diseases.setdefault(drugbank_id, set()).add((phenotype, dui, val))
# Number of study based value assignment
for phenotype, val in phenotype_to_count.iteritems():
dui = mesh_name_to_id[phenotype]
drug_to_diseases_n_study.setdefault(drugbank_id, set()).add((phenotype, dui, val))
#drug_to_diseases = drug_to_diseases_n_study
#print "Non matching drugs:", len(non_matching_drugs)
#print len(drug_to_diseases), drug_to_diseases.items()[:5]
cPickle.dump(drug_to_diseases, open(dump_file, 'w'))
return drug_to_diseases
def get_ct_data(base_dir, include_other_names=True, dump_file=None):
if dump_file is not None and os.path.exists(dump_file):
values = cPickle.load(open(dump_file))
#drug_to_ctids, ctid_to_conditions, ctid_to_values = values
return values
drug_to_ctids = get_interventions(base_dir, include_other_names)
ctid_to_conditions = get_ctid_to_conditions(base_dir)
ctid_to_values = get_ctid_to_details(base_dir)
values = drug_to_ctids, ctid_to_conditions, ctid_to_values
if dump_file is not None:
cPickle.dump(values, open(dump_file, 'w'))
return values
def get_ctid_to_conditions(base_dir):
condition_file = base_dir + "conditions.txt"
condition_file2 = base_dir + "condition_browse.txt"
# Get conditions
ctid_to_conditions = {}
f = open(condition_file)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
f = open(condition_file2)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
def get_ctid_to_details(base_dir):
study_file = base_dir + "clinical_study.txt" # _noclob
# Get phase etc information
f = open(study_file)
line = f.readline()
words = line.strip().split("|")
header_to_idx = dict((word.lower(), i) for i, word in enumerate(words))
text = None
ctid_to_values = {}
while line:
line = f.readline()
if line.startswith("NCT"):
if text is not None:
words = text.strip().split("|")
ctid = words[0]
try:
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
fda_regulated = words[header_to_idx["is_fda_regulated"]]
why_stopped = words[header_to_idx["why_stopped"]]
results_date = words[header_to_idx["firstreceived_results_date"]]
except:
print words
return
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
text = line
else:
text += line
f.close()
words = text.strip().split("|")
ctid = words[0]
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
return ctid_to_values
def get_interventions(base_dir, include_other_names=True):
#ctid_to_drugs = {}
drug_to_ctids = {}
intervention_file = base_dir + "interventions.txt"
f = open(intervention_file)
f.readline()
#prev_row = 0
ignored_intervention_types = set()
for line in f:
words = line.strip().split("|")
try:
row = int(words[0])
#if row != prev_row + 1:
# continue
except:
continue
#prev_row += 1
if len(words) < 5:
#print words
continue
ctid = words[1]
intervention = words[2]
drug = words[3]
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
if intervention != "Drug" and intervention != "Biological" :
ignored_intervention_types.add(intervention)
continue
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
#conditions = drug_to_interventions.setdefault(drug, set())
#conditions |= ctid_to_conditions[ctid]
f.close()
print "Ignored intervention types:", ignored_intervention_types
if include_other_names:
intervention_file = base_dir + "intervention_browse.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[2] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
intervention_file = base_dir + "intervention_other_names.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[3] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
return drug_to_ctids #ctid_to_drugs
def get_drug_to_interventions(drug_to_ctids):
drug_to_interventions = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
values = set()
for ctid in ctids:
#if ctid_to_values[ctid][0] != "Phase 3":
# continue
values |= ctid_to_conditions[ctid]
if len(values) == 0:
continue
drug_to_interventions.setdefault(drugbank_id, values)
#print "Non matching drugs:", len(non_matching_drugs)
#phenotypes = disease_to_drugs.keys()
#disease_to_interventions = {}
#for drug, interventions in drug_to_interventions.iteritems():
# for intervention in interventions:
# intervention = intervention.lower()
# for disease in phenotypes:
# values = text_utilities.tokenize_disease_name(disease)
# if all([ intervention.find(word.strip()) != -1 for word in values ]): # disease.split(",") ]):
# disease_to_drugs_ct.setdefault(disease, set()).add(drug)
# disease_to_interventions.setdefault(disease, set()).add(intervention)
#for disease, interventions in disease_to_interventions.iteritems():
# print disease, interventions
#print len(drug_to_interventions), drug_to_interventions.items()[:5]
#print drug_to_ctids["voriconazole"], print ctid_to_conditions["NCT00005912"], print ctid_to_values["NCT00005912"]
#print drug_to_interventions["DB00582"]
return drug_to_interventions
def get_frequent_interventions(drug_to_interventions):
condition_to_count = {}
for drug, interventions in drug_to_interventions.iteritems():
for condition in interventions:
if condition in condition_to_count:
condition_to_count[condition] += 1
else:
condition_to_count[condition] = 1
values = []
for condition, count in condition_to_count.iteritems():
values.append((count, condition))
values.sort()
values.reverse()
#print values[:50]
return values
if __name__ == "__main__":
main()
| mit | 6,743,207,231,642,477,000 | 35.266862 | 189 | 0.646802 | false | 2.854143 | false | false | false |
openstack/vitrage | vitrage/tests/unit/evaluator/template_validation/content/v1/test_parameters_validator.py | 1 | 1656 | # Copyright 2019 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.evaluator.template_validation.content.v1.get_param_validator \
import GetParamValidator
from vitrage.tests.unit.evaluator.template_validation.content.base import \
ValidatorTest
class ParametersValidatorTest(ValidatorTest):
"""Tests for the parameters validator of version 1
All tests should succeed, as long as there is no get_param reference in
the template itself
"""
def test_validate_no_parameters(self):
result = GetParamValidator.validate(
template={'alarm_name': "Don't add a comment"}, actual_params=None)
self._assert_correct_result(result)
def test_validate_empty_parameters(self):
result = GetParamValidator.validate(
template={'alarm_name': '+2 for everybody'}, actual_params={})
self._assert_correct_result(result)
def test_validate_with_parameter(self):
template = {'alarm_name': 'get_param(param1)'}
result = \
GetParamValidator.validate(template=template, actual_params={})
self._assert_fault_result(result, 160)
| apache-2.0 | 2,761,177,479,609,274,000 | 38.428571 | 79 | 0.71256 | false | 4.160804 | true | false | false |
savionok/RemoteHID | RemoteHidPythonServer/udpEchoServer.py | 1 | 2375 | #############################################################################
# Copyright 2012 Virtosu Sava #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#############################################################################
import socket
from threading import Thread
from configs import SERVER
class UDPEchoServer(Thread):
'''This UDP Server is used only for echo back message from client
The message with which server response back can be also specified.
'''
def __init__(self, port = SERVER.UDP_ECHO_PORT, response_msg = ''):
Thread.__init__(self)
self.running = True
self.UDPSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.response_msg = response_msg
try:
self.UDPSocket.bind(("", SERVER.UDP_ECHO_PORT))
except:
print 'Error 16: Port are in use. Fail to run server.'
def stop_server(self):
self.running = False
# kick new socket connection to past accept step in main loop.
temp_socket = socket.socket ( socket.AF_INET, socket.SOCK_DGRAM )
temp_socket.connect( (SERVER.UDP_HOST, SERVER.UDP_ECHO_PORT) )
def run (self):
while self.running:
data, address = self.UDPSocket.recvfrom(SERVER.MAX_BUFFER_SIZE)
#empty string counts as False
if self.response_msg:
self.UDPSocket.sendto( self.response_msg , (address[0], address[1]) )
else:
self.UDPSocket.sendto( data , (address[0], address[1]) )
#TODO: manage debug info
print "( " ,address[0], " " , address[1] , " ) said : ", data | apache-2.0 | 1,390,608,810,056,242,400 | 45.54 | 77 | 0.546947 | false | 4.144852 | false | false | false |
stamaimer/Hackthon | benchmark/stress.py | 1 | 11486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import collections
import contextlib
import functools
import itertools
import json
import math
import os
import random
import signal
import time
import redis
import pymysql
try:
import httplib
except ImportError:
import http.client as httplib
try:
import urllib.parse as urllib
except ImportError:
import urllib
from multiprocessing.pool import Pool, ThreadPool
from multiprocessing import Process
KEY_PREFIX = "stress_test:make_order"
USER_KEY = "{}:user".format(KEY_PREFIX)
SUCCESS_KEY = "{}:success".format(KEY_PREFIX)
FAILURE_KEY = "{}:failure".format(KEY_PREFIX)
ORDER_RESP_TIME_KEY = "{}:order_resp_time".format(KEY_PREFIX)
REQ_RESP_TIME_KEY = "{}:req_resp_time".format(KEY_PREFIX)
REQUEST_SUCCESS_KEY = "{}:request_success".format(KEY_PREFIX)
REQUEST_FAILURE_KEY = "{}:request_failure".format(KEY_PREFIX)
REQ_FINISH_TIME_KEY = "{}:req_finish_time".format(KEY_PREFIX)
ORDER_FINISH_TIME_KEY = "{}:order_finish_time".format(KEY_PREFIX)
redis_store = redis.Redis()
users, foods = {}, []
@contextlib.contextmanager
def db_query():
db = pymysql.connect(host=os.getenv("DB_HOST", "localhost"),
port=int(os.getenv("DB_PORT", 3306)),
user=os.getenv("DB_USER", "root"),
passwd=os.getenv("DB_PASS", "toor"),
db=os.getenv("DB_NAME", "eleme"))
try:
yield db
finally:
db.close()
def load_users():
global users
with db_query() as db:
cur = db.cursor()
# load users
cur.execute("SELECT id, name, password FROM user")
for i, name, pw in cur.fetchall():
users[i] = {"username": name, "password": pw}
redis_store.sadd(USER_KEY, *users.keys())
return users
def load_foods():
global foods
with db_query() as db:
cur = db.cursor()
cur.execute("SELECT id, stock, price FROM food")
for i, stock, price in cur.fetchall():
foods.append({"id": i, "stock": stock})
return foods
def safe_loads(data):
try:
return json.loads(data)
except:
return data
class QueryException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "{} {}".format(self.code, self.message)
class Query(object):
__slots__ = ["access_token", "user_id", "cart_id", "client"]
def __init__(self, host, port):
self.client = httplib.HTTPConnection(host, port, timeout=3)
self.access_token = None
self.user_id = None
self.cart_id = None
def request(self, method, url, headers=None, data=None):
data = data or {}
headers = headers or {}
headers["Content-Type"] = "application/json"
start = time.time()
status = None
try:
self.client.request(method, url, body=json.dumps(data),
headers=headers)
response = self.client.getresponse()
status = response.status
data = response.read().decode("utf-8")
self.client.close()
return {"status": status, "data": safe_loads(data)}
finally:
now = time.time()
elapsed = now - start
with redis_store.pipeline() as p:
if status in (200, 204):
p.incr(REQUEST_SUCCESS_KEY)
p.lpush(REQ_FINISH_TIME_KEY, now)
else:
p.incr(REQUEST_FAILURE_KEY)
p.lpush(REQ_RESP_TIME_KEY, elapsed)
p.execute()
def url(self, path):
assert self.access_token
params = {"access_token": self.access_token}
qs = urllib.urlencode(params)
return "{}?{}".format(path, qs) if qs else path
def _do_login(self, username, password):
data = {
"username": username,
"password": password
}
response = self.request("POST", "/login", data=data)
if response["status"] == 200:
self.access_token = response["data"]["access_token"]
return True
return False
def login(self):
user_id = redis_store.spop(USER_KEY)
if not user_id:
return False
self.user_id = int(user_id)
user = users[self.user_id]
return self._do_login(user["username"], user["password"])
def get_foods(self):
res = self.request("GET", self.url("/foods"))
return res["status"] == 200
def get_orders(self):
res = self.request("GET", self.url("/orders"))
return res["status"] == 200
def create_cart(self):
response = self.request("POST", self.url("/carts"))
try:
self.cart_id = response["data"].get("cart_id")
except:
return False
return response["status"] == 200
def cart_add_food(self):
food = random.choice(foods)
data = {"food_id": food["id"], "count": 1}
path = "/carts/{}".format(self.cart_id)
res = self.request("PATCH", self.url(path), data=data)
return res["status"] == 204
def make_order(self):
chain = [self.login, self.get_foods, self.create_cart,
self.cart_add_food, self.cart_add_food]
for action in chain:
if not action():
return False
data = {"cart_id": self.cart_id}
res = self.request("POST", self.url("/orders"), data=data)
return res["status"] == 200
def job(host, port):
q = Query(host, port)
start = time.time()
try:
ok = q.make_order()
except:
ok = False
end = time.time()
elapsed = end - start
with redis_store.pipeline() as p:
if ok:
p.incr(SUCCESS_KEY)
p.lpush(ORDER_FINISH_TIME_KEY, end)
else:
p.incr(FAILURE_KEY)
p.lpush(ORDER_RESP_TIME_KEY, elapsed)
p.execute()
def progress():
try:
prev = 0
while True:
time.sleep(1)
cur = get_value(SUCCESS_KEY)
msg = "Orders Per Second: {:4d}/s".format(cur - prev)
print(msg, end='')
print('\r' * len(msg), end='')
prev = cur
except KeyboardInterrupt:
pass
finally:
print('\n')
def thread(host, port, threads, num):
pool = ThreadPool(threads)
for _ in range(num):
pool.apply_async(job, (host, port))
time.sleep(0.001)
pool.close()
pool.join()
def divide(n, m):
"""Divide integer n to m chunks
"""
avg = int(n / m)
remain = n - m * avg
data = list(itertools.repeat(avg, m))
for i in range(len(data)):
if not remain:
break
data[i] += 1
remain -= 1
return data
def work(host, port, processes, threads, times):
pool = Pool(processes,
lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
p = Process(target=progress)
p.daemon = True
start = time.time()
try:
for chunk in divide(times, processes):
pool.apply_async(thread, (host, port, threads, chunk))
p.start()
pool.close()
pool.join()
p.terminate()
p.join()
except KeyboardInterrupt:
pool.terminate()
p.terminate()
p.join()
pool.join()
return time.time() - start
def get_value(key):
v = redis_store.get(key)
return 0 if v is None else int(v)
def get_range(key):
v = redis_store.lrange(key, 0, -1)
return [float(i) for i in v]
def safe_div(a, b):
return a / b if b else 0
def get_avg(l):
return safe_div(sum(l), float(len(l)))
def report(processes, threads, total_time, total_order):
success = get_value(SUCCESS_KEY)
failure = get_value(FAILURE_KEY)
req_success = get_value(REQUEST_SUCCESS_KEY)
req_failure = get_value(REQUEST_FAILURE_KEY)
req_resp_time = get_range(REQ_RESP_TIME_KEY)
order_resp_time = get_range(ORDER_RESP_TIME_KEY)
req_finish_time = get_range(REQ_FINISH_TIME_KEY)
order_finish_time = get_range(ORDER_FINISH_TIME_KEY)
assert len(order_resp_time) == success + failure
assert len(req_resp_time) == req_success + req_failure
req_avg = safe_div(sum(req_resp_time), float(req_success))
order_avg = safe_div(sum(order_resp_time), success)
req_sec = collections.Counter(int(t) for t in req_finish_time)
order_sec = collections.Counter(int(t) for t in order_finish_time)
# remove the highest and lowest score
stats_req_sec = sorted(req_sec.values())[1:-1]
max_req_sec = int(get_avg(stats_req_sec[-5:]))
min_req_sec = int(get_avg(stats_req_sec[:5]))
mean_req_sec = int(get_avg(stats_req_sec))
# remove the highest and lowest score
stats_order_sec = sorted(order_sec.values())[1:-1]
max_order_sec = int(get_avg(stats_order_sec[-5:]))
min_order_sec = int(get_avg(stats_order_sec[:5]))
mean_order_sec = int(get_avg(stats_order_sec))
p = functools.partial(print, sep='')
p("Score: ", max_order_sec)
p("Correct Rate: ", round(success / total_order * 100, 2), "%")
p("\nStats")
p("Concurrent Level: ", processes, " x ", threads)
p("Time taken for tests: ", round(total_time * 1000, 2), "ms")
p("Complete requests: ", req_success)
p("Failed requests: ", req_failure)
p("Complete orders: ", success)
p("Failed orders: ", failure)
p("Time per request: ", round(req_avg * 1000, 2), "ms", " (mean)")
p("Time per order: ", round(order_avg * 1000, 2), "ms", " (mean)")
p("Request per second: ", max_req_sec, " (max) ", min_req_sec, " (min) ", mean_req_sec, " (mean)") # noqa
p("Order per second: ", max_order_sec, " (max) ", min_order_sec, " (min) ", mean_order_sec, " (mean)") # noqa
p("\nPercentage of orders made within a certain time (ms)")
order_resp_time = sorted(set(order_resp_time)) if order_resp_time else [0]
l = len(order_resp_time)
for e in (0.5, 0.75, 0.8, 0.9, 0.95, 0.98, 1):
idx = int(l * e)
idx = 0 if idx == 0 else idx - 1
p(" {:>4.0%} ".format(e),
int(math.ceil(order_resp_time[idx] * 1000)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", default="localhost",
help="server host name")
parser.add_argument("-p", "--port", default=8080, type=int,
help="server port")
parser.add_argument("-c", "--processes", default=2, type=int,
help="processes")
parser.add_argument("-t", "--threads", default=4, type=int,
help="threads")
parser.add_argument("-n", "--num", default=10000, type=int,
help="requests")
args = parser.parse_args()
redis_store.delete(
USER_KEY, SUCCESS_KEY, FAILURE_KEY,
ORDER_RESP_TIME_KEY, REQ_RESP_TIME_KEY,
REQUEST_SUCCESS_KEY, REQUEST_FAILURE_KEY,
REQ_FINISH_TIME_KEY, ORDER_FINISH_TIME_KEY)
load_users()
load_foods()
total_time = work(
args.host, args.port, args.processes, args.threads, args.num)
report(args.processes, args.threads, total_time, float(args.num))
if __name__ == "__main__":
main()
| gpl-2.0 | 2,749,461,268,001,201,000 | 27.014634 | 118 | 0.566255 | false | 3.496499 | false | false | false |
taurenk/Crossfit-Project-API | app/models.py | 1 | 1269 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
athlete_teams = db.Table('athlete_teams',
db.Column('athlete_id', db.Integer, db.ForeignKey('athletes.id')),
db.Column('team_id', db.Integer, db.ForeignKey('teams.id'))
)
class Athlete(db.Model):
__tablename__ = 'athletes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
age = db.Column(db.Integer)
hieght = db.Column(db.String(4))
wieght = db.Column(db.Integer)
clean_and_jerk = db.Column(db.String(32))
snatch = db.Column(db.String(32))
deadlift = db.Column(db.String(32))
back_squat = db.Column(db.String(32))
max_pullups = db.Column(db.Integer)
run_5k = db.Column(db.String(32))
def __repr__(self):
return "{'name' : '%s'}" % self.name
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
captain = db.Column(db.String(64))
athletes = db.relationship('Athlete', secondary=athlete_teams,
backref=db.backref('teams', lazy='dynamic'))
def __repr__(self):
return "<%s, %s>" % (self.name, self.athletes) | mit | 4,826,109,988,930,293,000 | 27.863636 | 70 | 0.5855 | false | 3.057831 | false | false | false |
wutienyang/ES_pttmovie | auto/pttmovie/crawler.py | 1 | 8864 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import sys
import json
import requests
import argparse
import time
import codecs
from bs4 import BeautifulSoup
from six import u
__version__ = '1.0'
# if python 2, disable verify flag in requests.get()
VERIFY = True
if sys.version_info[0] < 3:
VERIFY = False
requests.packages.urllib3.disable_warnings()
class PttWebCrawler(object):
PTT_URL = 'https://www.ptt.cc'
"""docstring for PttWebCrawler"""
def __init__(self, cmdline=None, as_lib=False):
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='''
A crawler for the web version of PTT, the largest online community in Taiwan.
Input: board name and page indices (or articla ID)
Output: BOARD_NAME-START_INDEX-END_INDEX.json (or BOARD_NAME-ID.json)
''')
parser.add_argument('-b', metavar='BOARD_NAME', help='Board name', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-i', metavar=('START_INDEX', 'END_INDEX'), type=int, nargs=2, help="Start and end index")
group.add_argument('-a', metavar='ARTICLE_ID', help="Article ID")
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
if not as_lib:
if cmdline:
args = parser.parse_args(cmdline)
else:
args = parser.parse_args()
board = args.b
if args.i:
start = args.i[0]
if args.i[1] == -1:
end = self.getLastPage(board)
else:
end = args.i[1]
self.parse_articles(start, end, board)
else: # args.a
article_id = args.a
self.parse_article(article_id, board)
def parse_articles(self, start, end, board, path='.', timeout=3):
filename = board + '-' + str(start) + '-' + str(end) + '.json'
filename = os.path.join(path, filename)
self.store(filename, u'{"articles": [', 'w')
for i in range(end-start+1):
index = start + i
print('Processing index:', str(index))
resp = requests.get(
url = self.PTT_URL + '/bbs/' + board + '/index' + str(index) + '.html',
cookies={'over18': '1'}, verify=VERIFY, timeout=timeout
)
if resp.status_code != 200:
print('invalid url:', resp.url)
continue
soup = BeautifulSoup(resp.text, 'html.parser')
divs = soup.find_all("div", "r-ent")
for div in divs:
try:
# ex. link would be <a href="/bbs/PublicServan/M.1127742013.A.240.html">Re: [問題] 職等</a>
href = div.find('a')['href']
link = self.PTT_URL + href
article_id = re.sub('\.html', '', href.split('/')[-1])
if div == divs[-1] and i == end-start: # last div of last page
self.store(filename, self.parse(link, article_id, board), 'a')
else:
self.store(filename, self.parse(link, article_id, board) + ',\n', 'a')
except:
pass
time.sleep(0.1)
self.store(filename, u']}', 'a')
return filename
def parse_article(self, article_id, board, path='.'):
link = self.PTT_URL + '/bbs/' + board + '/' + article_id + '.html'
filename = board + '-' + article_id + '.json'
filename = os.path.join(path, filename)
self.store(filename, self.parse(link, article_id, board), 'w')
return filename
@staticmethod
def parse(link, article_id, board, timeout=3):
print('Processing article:', article_id)
resp = requests.get(url=link, cookies={'over18': '1'}, verify=VERIFY, timeout=timeout)
if resp.status_code != 200:
print('invalid url:', resp.url)
return json.dumps({"error": "invalid url"}, sort_keys=True, ensure_ascii=False)
soup = BeautifulSoup(resp.text, 'html.parser')
main_content = soup.find(id="main-content")
metas = main_content.select('div.article-metaline')
author = ''
title = ''
date = ''
if metas:
author = metas[0].select('span.article-meta-value')[0].string if metas[0].select('span.article-meta-value')[0] else author
title = metas[1].select('span.article-meta-value')[0].string if metas[1].select('span.article-meta-value')[0] else title
date = metas[2].select('span.article-meta-value')[0].string if metas[2].select('span.article-meta-value')[0] else date
# remove meta nodes
for meta in metas:
meta.extract()
for meta in main_content.select('div.article-metaline-right'):
meta.extract()
# remove and keep push nodes
pushes = main_content.find_all('div', class_='push')
for push in pushes:
push.extract()
try:
ip = main_content.find(text=re.compile(u'※ 發信站:'))
ip = re.search('[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*', ip).group()
except:
ip = "None"
# 移除 '※ 發信站:' (starts with u'\u203b'), '◆ From:' (starts with u'\u25c6'), 空行及多餘空白
# 保留英數字, 中文及中文標點, 網址, 部分特殊符號
filtered = [ v for v in main_content.stripped_strings if v[0] not in [u'※', u'◆'] and v[:2] not in [u'--'] ]
expr = re.compile(u(r'[^\u4e00-\u9fa5\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b\s\w:/-_.?~%()]'))
for i in range(len(filtered)):
filtered[i] = re.sub(expr, '', filtered[i])
filtered = [_f for _f in filtered if _f] # remove empty strings
filtered = [x for x in filtered if article_id not in x] # remove last line containing the url of the article
content = ' '.join(filtered)
content = re.sub(r'(\s)+', ' ', content)
# print 'content', content
# push messages
p, b, n = 0, 0, 0
messages = []
for push in pushes:
if not push.find('span', 'push-tag'):
continue
push_tag = push.find('span', 'push-tag').string.strip(' \t\n\r')
push_userid = push.find('span', 'push-userid').string.strip(' \t\n\r')
# if find is None: find().strings -> list -> ' '.join; else the current way
push_content = push.find('span', 'push-content').strings
push_content = ' '.join(push_content)[1:].strip(' \t\n\r') # remove ':'
push_ipdatetime = push.find('span', 'push-ipdatetime').string.strip(' \t\n\r')
messages.append( {'push_tag': push_tag, 'push_userid': push_userid, 'push_content': push_content, 'push_ipdatetime': push_ipdatetime} )
if push_tag == u'推':
p += 1
elif push_tag == u'噓':
b += 1
else:
n += 1
# count: 推噓文相抵後的數量; all: 推文總數
message_count = {'all': p+b+n, 'count': p-b, 'push': p, 'boo': b, "neutral": n}
# print 'msgs', messages
# print 'mscounts', message_count
# json data
data = {
'url': link,
'board': board,
'article_id': article_id,
'article_title': title,
'author': author,
'date': date,
'content': content,
'ip': ip,
'message_conut': message_count,
'messages': messages
}
# print 'original:', d
return json.dumps(data, sort_keys=True, ensure_ascii=False)
@staticmethod
def getLastPage(board, timeout=3):
content = requests.get(
url= 'https://www.ptt.cc/bbs/' + board + '/index.html',
cookies={'over18': '1'}, timeout=timeout
).content.decode('utf-8')
first_page = re.search(r'href="/bbs/' + board + '/index(\d+).html">‹', content)
if first_page is None:
return 1
return int(first_page.group(1)) + 1
@staticmethod
def store(filename, data, mode):
with codecs.open(filename, mode, encoding='utf-8') as f:
f.write(data)
@staticmethod
def get(filename, mode='r'):
with codecs.open(filename, mode, encoding='utf-8') as f:
return json.load(f)
if __name__ == '__main__':
c = PttWebCrawler()
| mit | -7,976,881,766,051,306,000 | 40.647619 | 147 | 0.529614 | false | 3.536595 | false | false | false |
rjkeller/gentoo-installer | BuildKernel.py | 1 | 1521 | #!/usr/bin/env python
import fileinput
import sys
///<summary>
///Provides a bunch of operations to format and manage hard disks.
///
///One cool thing that this class does is allow you to generate a fstab file
///based on disk formatting operations conducted earlier using this class. This
///is helpful when installing a new Gentoo installation.
///</summary>
class BuildKernel:
def compileNewKernel(self, kernelType, initSettings):
f = fopen('/etc/superGentoo/kernel', 'w')
f.write(kernelType + "," + initSettings)
f.close()
os.system("emerge " + kernelType)
os.system("mv /usr/src/.config /usr/src/linux/.config")
os.system("touch /usr/src/linux/.config")
os.system("cd /usr/src/linux")
os.system("make")
os.system("make modules_install")
os.system("cp arch/x86_64/boot/bzImage /boot/kernel-`find /usr/src -name linux-3* | awk -Flinux- '{print \$NF }'`")
def upgradeKernel(self):
kernelData = open('/etc/superGentoo/kernel').read(1000).split(",")
os.system("emerge --update ". kernelData[0])
os.system()
//--------------------------------------------------------------------------//
// MAIN FUNCTION
//--------------------------------------------------------------------------//
if __name__ == '__main__':
bk = BuildKernel()
if sys.argv[1] == "upgrade":
bk.upgradeKernel()
elif sys.argv[1] == "newKernel":
bk.compileNewKernel(sys.argv[2], sys.argv[3])
| apache-2.0 | 1,754,916,898,967,739,600 | 33.568182 | 123 | 0.560158 | false | 3.727941 | false | false | false |
hjoliver/cylc | cylc/flow/command_polling.py | 1 | 3469 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Encapsulates polling activity for CLI commands."""
import sys
from time import sleep
class Poller:
"""Encapsulates polling activity for cylc commands. Derived classes
must override the check() method to test the polling condition."""
@classmethod
def add_to_cmd_options(cls, parser, d_interval=60, d_max_polls=10):
"""Add command line options for commands that can do polling"""
parser.add_option(
"--max-polls",
help="Maximum number of polls (default " + str(d_max_polls) + ").",
metavar="INT",
action="store",
dest="max_polls",
default=d_max_polls)
parser.add_option(
"--interval",
help=(
"Polling interval in seconds (default " + str(d_interval) +
")."
),
metavar="SECS",
action="store",
dest="interval",
default=d_interval)
def __init__(self, condition, interval, max_polls, args):
self.condition = condition # e.g. "workflow stopped"
# check max_polls is an int
try:
self.max_polls = int(max_polls)
except ValueError:
sys.exit("max_polls must be an int")
# check interval is an int
try:
self.interval = int(interval)
except ValueError:
sys.exit("interval must be an integer")
self.n_polls = 0
self.args = args # any extra parameters needed by check()
def check(self):
"""Abstract method. Test polling condition."""
raise NotImplementedError()
def poll(self):
"""Poll for the condition embodied by self.check().
Return True if condition met, or False if polling exhausted."""
if self.max_polls == 0:
# exit 1 as we can't know if the condition is satisfied
sys.exit("WARNING: nothing to do (--max-polls=0)")
elif self.max_polls == 1:
sys.stdout.write("checking for '%s'" % self.condition)
else:
sys.stdout.write("polling for '%s'" % self.condition)
while self.n_polls < self.max_polls:
self.n_polls += 1
if self.check():
sys.stdout.write(": satisfied\n")
return True
if self.max_polls > 1:
sys.stdout.write(".")
sleep(self.interval)
sys.stdout.write("\n")
if self.max_polls > 1:
sys.stderr.write(
"ERROR: condition not satisfied after %d polls\n" %
self.max_polls)
else:
sys.stderr.write("ERROR: condition not satisfied\n")
return False
| gpl-3.0 | -7,614,361,080,997,373,000 | 34.762887 | 79 | 0.59066 | false | 4.129762 | false | false | false |
brglng/zobject | .ycm_extra_conf.py | 1 | 6274 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-W',
'-Wall',
'-Wextra',
'-Wno-unused-parameter',
'-Wno-multichar',
#'-fexceptions',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=gnu11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-I',
'./include',
'-I',
'./src',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| lgpl-3.0 | 2,080,756,639,750,032,000 | 34.050279 | 82 | 0.687121 | false | 3.756886 | false | false | false |
imanhodjaev/django-userapp | django_userapp/backends.py | 1 | 2120 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.conf import settings
from .request import login
import re
UserModel = get_user_model()
class UserappBackend(object):
def authenticate(self, username=None, password=None, request=None, **kwargs):
result = login(request, username=username, password=password)
try:
if result is None:
raise UserModel.DoesNotExist("Userapp account not found")
user = result[0]
default_email = getattr(settings, "USERAPP_DEFAULT_EMAIL", "[email protected]")
if self.passes_checks(user):
email = getattr(user, "email", default_email)
our_username = re.sub(r"[@\.\-]", "_", username)
our_user, created = UserModel.objects.get_or_create(email__exact=email)
if created: # If user is new user then set username
our_user.username = our_username[0:29]
our_user.email = email
our_user.save()
if not our_user.password: # Means that user was created by our backend
return our_user
return None
else:
return None
except UserModel.DoesNotExist:
return None
def get_user(self, user_id):
try:
return UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
def passes_checks(self, user):
""" Basically checks features and if finds any match returns True """
user_features = {}
features = getattr(settings, "USERAPP_FEATURES", [])
use_features = getattr(settings, "USERAPP_USE_FEATURES", False)
if "features" in user:
user_features = user["features"]
if use_features is False:
return True
for feature in features:
if feature in user_features and user_features[feature]["value"]:
return True
return False
| lgpl-3.0 | -3,423,429,394,243,385,000 | 30.641791 | 92 | 0.579245 | false | 4.435146 | false | false | false |
jptomo/rpython-lang-scheme | rpython/rlib/parsing/parsing.py | 1 | 12986 | import py
from rpython.rlib.parsing.lexer import SourcePos
from rpython.rlib.parsing.tree import Node, Symbol, Nonterminal
class Rule(object):
def __init__(self, nonterminal, expansions):
self.nonterminal = nonterminal
self.expansions = expansions
def getkey(self):
return (self.nonterminal, tuple(self.expansions))
# def __hash__(self):
# return hash(self.getkey())
def __eq__(self, other):
return self.getkey() == other.getkey()
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (
self.nonterminal, " | ".join([repr(e) for e in self.expansions]))
def __repr__(self):
return "Rule(%r, %r)" % (self.nonterminal, self.expansions)
class LazyInputStream(object):
def __init__(self, iterator):
self.iterator = iter(iterator)
self.data = []
def __getitem__(self, index):
assert index >= 0
while len(self.data) <= index:
try:
self.data.append(self.iterator.next())
except StopIteration:
raise IndexError("index out of range")
return self.data[index]
class ParseError(Exception):
def __init__(self, source_pos, errorinformation):
self.source_pos = source_pos
self.errorinformation = errorinformation
self.args = (source_pos, errorinformation)
def nice_error_message(self, filename="<unknown>", source=""):
# + 1 is because source_pos is 0-based and humans 1-based
result = [" File %s, line %s" % (filename, self.source_pos.lineno + 1)]
if source:
result.append(source.split("\n")[self.source_pos.lineno])
result.append(" " * self.source_pos.columnno + "^")
else:
result.append("<couldn't get source>")
if self.errorinformation:
failure_reasons = self.errorinformation.failure_reasons
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class ErrorInformation(object):
def __init__(self, pos, failure_reasons=None):
if failure_reasons is None:
failure_reasons = []
self.failure_reasons = failure_reasons
self.pos = pos
def combine_errors(self, other):
if self is None:
return other
if (other is None or self.pos > other.pos or
len(other.failure_reasons) == 0):
return self
elif other.pos > self.pos or len(self.failure_reasons) == 0:
return other
failure_reasons = []
already_there = {}
for fr in [self.failure_reasons, other.failure_reasons]:
for reason in fr:
if reason not in already_there:
already_there[reason] = True
failure_reasons.append(reason)
return ErrorInformation(self.pos, failure_reasons)
class LazyParseTable(object):
def __init__(self, input, parser):
self.parser = parser
self.input = input
self.matched = {}
self.errorinformation = {}
def match_symbol(self, i, symbol):
#print i, symbol
#print self.matched.keys()
if (i, symbol) in self.matched:
return self.matched[i, symbol]
error = None # for the annotator
if self.parser.is_nonterminal(symbol):
rule = self.parser.get_rule(symbol)
subsymbol = None
error = None
for expansion in rule.expansions:
curr = i
children = []
for subsymbol in expansion:
node, next, error2 = self.match_symbol(curr, subsymbol)
if node is None:
error = combine_errors(error, error2)
break
children.append(node)
curr = next
else:
assert len(expansion) == len(children)
result = (Nonterminal(symbol, children), curr, error)
self.matched[i, symbol] = result
return result
self.matched[i, symbol] = None, 0, error
return None, 0, error
else:
try:
input = self.input[i]
if self.terminal_equality(symbol, input):
result = (Symbol(symbol, input.source, input), i + 1, error)
self.matched[i, symbol] = result
return result
else:
# XXX hack unnice: handles the sort of token names that
# ebnfparse produces
if (symbol.startswith("__") and
symbol.split("_")[2][0] in "0123456789"):
expected = symbol.split("_")[-1]
else:
expected = symbol
error = ErrorInformation(i, [expected])
except IndexError:
error = ErrorInformation(i)
return None, 0, error
def terminal_equality(self, symbol, input):
return symbol == input.name
class PackratParser(object):
def __init__(self, rules, startsymbol, parsetablefactory=LazyParseTable,
check_for_left_recursion=True):
self.rules = rules
self.nonterminal_to_rule = {}
for rule in rules:
self.nonterminal_to_rule[rule.nonterminal] = rule
self.startsymbol = startsymbol
if check_for_left_recursion:
assert not self.has_left_recursion()
self.parsetablefactory = parsetablefactory
def is_nonterminal(self, symbol):
return symbol in self.nonterminal_to_rule
def get_rule(self, symbol):
return self.nonterminal_to_rule[symbol]
def parse(self, tokeniterator, lazy=False):
if lazy:
input = LazyInputStream(tokeniterator)
else:
input = list(tokeniterator)
table = self.parsetablefactory(input, self)
result = table.match_symbol(0, self.startsymbol)
if result[0] is None:
error = result[2]
raise ParseError(input[error.pos].source_pos, error)
return result[0]
def has_left_recursion(self):
"""NOT_RPYTHON"""
follows = {}
for rule in self.rules:
follow = py.builtin.set()
follows[rule.nonterminal] = follow
for expansion in rule.expansions:
if expansion and self.is_nonterminal(expansion[0]):
follow.add(expansion[0])
changed = True
while changed:
changed = False
for nonterminal, follow in follows.iteritems():
for nt in follow:
subfollow = follows[nt]
update = subfollow - follow
if update:
changed = True
follow.update(update)
break
for nonterminal, follow in follows.iteritems():
if nonterminal in follow:
print "nonterminal %s is in its own follow %s" % (nonterminal, follow)
return True
return False
def __repr__(self):
from pprint import pformat
return "%s%s" % (self.__class__.__name__,
pformat((self.rules, self.startsymbol)), )
class ParserCompiler(object):
def __init__(self, parser):
self.parser = parser
self.allcode = []
self.symbol_to_number = {}
self.made = {}
def compile(self):
from rpython.tool.sourcetools import func_with_new_name
self.allcode.append("class CompileableParser(baseclass):")
self.make_matcher(self.parser.startsymbol)
self.make_fixed()
miniglobals = globals().copy()
miniglobals["baseclass"] = self.parser.__class__
#print "\n".join(self.allcode)
exec py.code.Source("\n".join(self.allcode)).compile() in miniglobals
kls = miniglobals["CompileableParser"]
# XXX
parsetable = self.parser.parsetablefactory([], self.parser)
kls.terminal_equality = func_with_new_name(
parsetable.terminal_equality.im_func,
"terminal_equality_compileable")
return kls
def get_number(self, symbol):
if symbol in self.symbol_to_number:
return self.symbol_to_number[symbol]
result = len(self.symbol_to_number)
self.symbol_to_number[symbol] = result
return result
def make_matcher(self, symbol):
if symbol not in self.made:
self.made[symbol] = True
if self.parser.is_nonterminal(symbol):
self.make_nonterminal_matcher(symbol)
else:
self.make_terminal_matcher(symbol)
def make_terminal_matcher(self, symbol):
number = self.get_number(symbol)
self.allcode.append("""
def match_terminal%(number)s(self, i):
# matcher for terminal %(number)s %(symbol)r
if i in self.matched_terminals%(number)s:
return self.matched_terminals%(number)s[i]
try:
input = self.input[i]
if self.terminal_equality(%(symbol)r, input):
symbol = Symbol(%(symbol)r, input.name, input)
result = (symbol, i + 1)
self.matched_terminals%(number)s[i] = result
return result
except IndexError:
pass
return None, i""" % vars())
def make_nonterminal_matcher(self, symbol):
number = self.get_number(symbol)
rule = self.parser.nonterminal_to_rule[symbol]
code = []
code.append("""
def match_nonterminal%(number)s(self, i):
# matcher for nonterminal %(number)s %(symbol)s
if i in self.matched_nonterminals%(number)s:
return self.matched_nonterminals%(number)s[i]
last_failed_position = 0
subsymbol = None
expansionindex = 0
while 1:""" % vars())
for expansionindex, expansion in enumerate(rule.expansions):
nextindex = expansionindex + 1
code.append("""\
if expansionindex == %s:""" % (expansionindex, ))
if not expansion:
code.append("""\
result = (Nonterminal(symbol, []), i)
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
continue
code.append("""\
curr = i
children = []""")
for subsymbol in expansion:
self.make_matcher(subsymbol)
if self.parser.is_nonterminal(subsymbol):
match = "match_nonterminal%s" % self.get_number(subsymbol)
else:
match = "match_terminal%s" % self.get_number(subsymbol)
code.append("""\
node, next = self.%(match)s(curr)
if node is None:
last_failed_position = next
expansionindex = %(nextindex)s
continue
curr = next""" % vars())
code.append("""\
result = (Nonterminal(%(symbol)r, children), curr)
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
code.append("""\
if expansionindex == %(nextindex)s:
result = None, last_failed_position
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
self.allcode.extend(code)
def make_fixed(self):
# __init__
code = ["""
def __init__(self):
self.rules = [] # dummy
self.nonterminal_to_rule = {} # dummy
self.startsymbol = "" # dummy
self.parsetablefactory = None # dummy"""]
for symbol, number in self.symbol_to_number.iteritems():
if self.parser.is_nonterminal(symbol):
name = "matched_nonterminals%s" % number
else:
name = "matched_terminals%s" % number
code.append("""\
self.%(name)s = {}""" % vars())
# parse
startsymbol = self.get_number(self.parser.startsymbol)
code.append("""
def parse(self, tokenlist, lazy=True):
self.input = tokenlist
result = self.match_nonterminal%(startsymbol)s(0)
if result[0] is None:
raise ParseError(None, self.input[result[1]])
return result[0]""" % (vars()))
self.allcode.extend(code)
| mit | -8,121,115,071,978,187,000 | 36.423631 | 86 | 0.54243 | false | 4.263296 | false | false | false |
CosmicLaserShow/CosmicLaserShow | pysparc/pysparc/ftdi_chip.py | 1 | 7036 | """Access FTDI hardware.
Contents
--------
:class:`Error`
Base error class.
:class:`DeviceNotFoundError`
Raised when device is not connected.
:class:`DeviceError`
Raised for generic pylibftdi exceptions.
:class:`ReadError`
Raised on read errors.
:class:`WriteError`
Raised on write errors.
:class:`FtdiChip`
Access FTDI hardware.
"""
import logging
import time
import pylibftdi
logger = logging.getLogger(__name__)
# FTDI documentation: must be multiple of block size, which is 64 bytes
# with 2 bytes overhead. So, must be multiple of 62 bytes.
READ_SIZE = 62
# Default buffer size is 4K (64 * 64 bytes), but mind the overhead
BUFFER_SIZE = 64 * 62
# Sleep between read/write error retries in seconds
RW_ERROR_WAIT = .5
# parity for rs232 line settings in libftdi::ftdi_set_line_property
PARITY_NONE = 0
PARITY_ODD = 1
PARITY_EVEN = 2
PARITY_MARK = 3
PARITY_SPACE = 4
# bitsize for rs232 line settings in libftdi::ftdi_set_line_property
BITS_8 = 8
BITS_7 = 7
# stopbits for rs232 line settings in libftdi::ftdi_set_line_property
STOP_BIT_1 = 0
STOP_BIT_15 = 1
STOP_BIT_2 = 2
class Error(Exception):
"""Base error class."""
def __init__(self, msg):
self.ftdi_msg = msg
class DeviceNotFoundError(Error):
"""Raised when device is not connected."""
def __str__(self):
return "Device not found."
class DeviceError(Error):
"""Raised for generic pylibftdi exceptions."""
def __str__(self):
return "Device error: %s" % self.ftdi_msg
class ClosedDeviceError(Error):
"""Raised when trying a read/write operation if device is closed."""
def __str__(self):
return "Device is closed, %s" % self.ftdi_msg
class ReadError(Error):
"""Raised on read errors."""
def __str__(self):
return "Device read error: %s" % self.ftdi_msg
class WriteError(Error):
"""Raised on write errors."""
def __str__(self):
return "Device write error: %s" % self.ftdi_msg
class FtdiChip(object):
"""Access FTDI hardware.
Instantiate this class to get access to connected FTDI hardware.
The hardware device is opened during instantiation.
You can use the :meth:`find_all` static method to list all connected
devices before openening them::
>>> FtdiChip.find_all()
"""
_device = None
closed = True
def __init__(self, device_description=None, interface_select=0, device_index=0):
self._device_description = device_description
self._interface_select = interface_select
self._device_index = device_index
self.open()
def open(self):
"""Open device.
Raises :class:`DeviceNotFoundError` if the device cannot be found.
Raises :class:`DeviceError` if the device cannot be opened.
"""
if self._device is None:
try:
logger.info("Initialising Ftdi device {} {}".format(self._device_description, self._device_index))
self._device = pylibftdi.Device(self._device_description,
interface_select=self._interface_select, device_index=self._device_index)
except pylibftdi.FtdiError as exc:
if "(-3)" in str(exc):
raise DeviceNotFoundError(str(exc))
else:
raise DeviceError(str(exc))
else:
# force default latency timer of 16 ms
# on some systems, this reverts to 0 ms if not set explicitly
self._device.ftdi_fn.ftdi_set_latency_timer(16)
self.closed = False
self.flush()
else:
return
def __del__(self):
self.close()
def set_line_settings(self, bits, parity, stop_bit):
"""Set line settings (bits, parity, stop bit).
:param bits: one of BITS_8 or BITS_7
:param parity: one of PARITY_NONE, PARITY_ODD, PARITY_EVEN,
PARITY_MARK, PARITY_SPACE
:param stop_bit: one of STOP_BIT_1, STOP_BIT_15, STOP_BIT_2
"""
self._device.ftdi_fn.ftdi_set_line_property(bits, stop_bit, parity)
def close(self):
"""Close device."""
if not self.closed:
self._device.close()
self._device = None
self.closed = True
@staticmethod
def find_all():
"""Find all connected FTDI devices.
:returns: list of (manufacturer, description, serial#) tuples.
"""
return pylibftdi.Driver().list_devices()
def flush(self):
"""Flush device buffers.
To completely clear out outdated measurements, e.g. when changing
parameters, call this method. All data received after this method
is called is really newly measured.
"""
print("Starting device flush")
self._device.flush()
self.read(BUFFER_SIZE)
print("Device flush finished")
def read(self, read_size=None):
"""Read from device and retry if necessary.
A read is tried three times. When unsuccesful, raises
:class:`ReadError`. Raises :class:`ClosedDeviceError` when
attempting to read from a closed device.
:param read_size: number of bytes to read (defaults to READ_SIZE).
As per the FTDI documentation, this should be a multiple of 62
for best performance.
:returns: string containing the data.
"""
if self.closed:
logger.warning("Attempting to read from closed device.")
raise ClosedDeviceError("attempting to read.")
if not read_size:
read_size = READ_SIZE
for i in range(3):
#print("Reading from device (attempt {})".format(i))
try:
data = self._device.read(read_size)
except pylibftdi.FtdiError as exc:
logger.warning("Read failed, retrying...")
time.sleep(RW_ERROR_WAIT)
continue
else:
return data
logger.error("Read failed.")
self.close()
raise ReadError(str(exc))
def write(self, data):
"""Write to device and retry if necessary.
A write is tried three times. When unsuccesful, raises
:class:`WriteError`. Raises :class:`ClosedDeviceError` when
attempting to write from a closed device.
:param data: string containing the data to write.
"""
if self.closed:
logger.warning("Attempting to read from closed device.")
raise ClosedDeviceError("attempting to write.")
for i in range(3):
try:
self._device.write(data)
except pylibftdi.FtdiError as exc:
logger.warning("Write failed, retrying...")
time.sleep(RW_ERROR_WAIT)
continue
else:
return
logger.error("Write failed.")
self.close()
raise WriteError(str(exc))
| mit | -7,149,492,045,259,250,000 | 25.651515 | 114 | 0.599915 | false | 4.029782 | false | false | false |
lucidfrontier45/PyVB | pyvb/old_ver/vbgmm1d.py | 1 | 7659 | #!/usr/bin/python
import numpy as np
from numpy.random import randn,dirichlet
from scipy.linalg import det, inv
from scipy.cluster import vq
from scipy.special import psi,gammaln
from core import normalize
try:
from _vbgmm1d import _evaluateHiddenState_C, _lnPD_C
ext_imported = True
except:
ext_imported = False
print "warning, Cython extension module was not found"
print "computation can be slower"
def testData1(n=100):
X = np.r_[randn(n*2)]
return X
def testData2(n=100):
X = np.r_[randn(n*2) / 0.3 , randn(n) + 10.0]
return X
def GaussianPDF(x,mu,s):
return np.exp(-((x - mu)**2)*s*0.5)*np.sqrt(s/(2.0*np.pi))
def lnZ_Wishart(nu,V):
# log normalization constant of 1D Wishart
lnZ = 0.5 * nu * np.log(2.0*V) + gammaln(nu * 0.5)
return lnZ
class VBGMM1D:
def __init__(self,nmix=10,m=0.0,beta=2,nu=1,s=0.1):
self._nstates = nmix
self._m0 = m
self._beta0 = beta
self._nu0 = nu
self._s0 = s
self.pi = np.ones(nmix) / float(nmix)
def _init_params(self,obs,use_emgmm=False):
self._set_posterior(obs,use_emgmm)
def _set_posterior(self,obs,use_emgmm=False):
nobs = len(obs)
nmix = self._nstates
# hidden states
self.z = dirichlet(np.tile(1.0/nmix,nmix),nobs)
# mixing coefficients
#self.u = np.tile(self._u0,nmix)
# posterior mean vector
self.m, temp = vq.kmeans2(obs,nmix)
self.beta = np.tile(self._beta0,nmix)
# posterior degree of freedom
self.nu = np.tile(float(nobs)/nmix,nmix)
# posterior precision
self.s = np.tile(self._s0,nmix)
def _VBE(self,obs,use_ext=True):
self._et = self.s * self.nu # <tau_k>
self._elnt = psi(self.nu*0.5) + np.log(2.0*self.s) # <ln(t_k)>
self.z = self._evaluateHiddenState(obs,use_ext)
def _evaluateHiddenState(self,obs,use_ext=True):
nobs = len(obs)
nmix = self._nstates
ln2pi = np.log(2.0 * np.pi)
z = np.tile(np.log(self.pi) + 0.5 * self._elnt - 0.5 * ln2pi ,(nobs,1))
if use_ext and ext_imported :
pass
else :
for k in xrange(nmix):
# very slow! need Fortran or C codes
dobs = obs - self.m[k]
z[:,k] -= 0.5 * (1.0/self.beta[k] + self.nu[k]*self.s[k]*(dobs**2))
z = z - z.max(1)[np.newaxis].T
z = np.exp(z)
z = normalize(z,1)
return z
def _VBM(self,obs):
self._calcSufficientStatistic(obs)
self._updatePosteriorParameters(obs)
def _calcSufficientStatistic(self,obs):
self.N = self.z.sum(0)
self.xbar = np.dot(obs,self.z) / self.N
self.C = np.diag(np.dot(((obs - self.xbar[np.newaxis].T)**2),self.z))
self.pi = self.N / self.N.sum()
def _updatePosteriorParameters(self,obs):
self.beta = self.N + self._beta0
self.m = (self._beta0 * self._m0 + self.N * self.xbar) / self.beta
self.nu = self._nu0 + self.N
self.s = 1.0 / (1.0/self._s0 + self.C + (self._beta0 *self.N / self.beta) \
* (self.xbar - self._m0)**2)
def _VBLowerBound(self,obs,use_ext=True):
# variational lower bound
nmix = self._nstates
self.N = self.z.sum(0) # need to be updated !!
# <lnp(X|Z,theta)>
# very slow! neew Fortran or C codes
lnpX = np.dot(self.N,(np.log(self.pi) + 0.5 * self._elnt))
for k in xrange(nmix):
dobs = obs - self.m[k]
lnpX -= self.N[k] * 1.0 / self.beta[k] + self.s[k] * self.nu[k] * \
(dobs * dobs).sum()
# H[q(z)] = -<lnq(z)>
Hz = 0.0
Hz = - np.nan_to_num(self.z * np.log(self.z)).sum()
#for k in xrange(nmix):
# Hz -= np.dot(self.z[:,k],np.log(self.z[:,k]))
# KL[q(pi)||p(pi)]
#KLpi = ( - gammaln(self.u) + self.N * psi(self.u)).sum()
KLpi = 0
# KL[q(mu,tau)||p(mu,tau)]
KLmt = 0
#KLmt = ((self.N * self._elnt + self.nu * (self.s / self._s0 - 1.0 - \
# np.log(2.0 * self.s)) + np.log(self.beta) + self._beta0 / self.beta + \
# self.nu * self.s * self._beta0 * (self.m - self._m0)**2) * 0.5 - \
# gammaln(self.nu * 0.5)).sum()
# Wishart part
KLmt = (self.N * self._elnt + self.nu * (self.s / self._s0 - 1.0)).sum() \
* 0.5 + nmix * lnZ_Wishart(self._nu0,self._s0)
for k in xrange(nmix):
KLmt -= lnZ_Wishart(self.nu[k],self.s[k])
# Conditional Gaussian part
KLmt += 0.5 * (np.log(self.beta/self._beta0) + self._beta0/self.beta - 1 \
+ self._beta0 * self.nu * self.s * (self.m-self._m0)**2).sum()
return lnpX + Hz - KLpi - KLmt
def _VBLowerBound2(self,obs,use_ext=True):
# variational lower bound
nobs = len(obs)
nmix = self._nstates
self.N = self.z.sum(0) # need to be updated !!
# KL[q(z)||p(z)]
KLz = 0.0
for k in xrange(nmix):
KLz -= np.dot(self.z[:,k],np.log(self.z[:,k]))
KLz += np.dot(self.N,np.log(self.pi))
# KL[q(mu,tau)||p(mu,tau)]
KLmt = (np.log(self.beta).sum() - nmix * self._beta0) * 0.5
KLmt += lnZ_Wishart(self._nu0,self._s0) * nmix
for k in xrange(nmix):
KLmt -= lnZ_Wishart(self.nu[k],self.s[k])
#print "%12.5e %12.5e %12.5e"%(Hz,-KLpi,-KLmt)
return KLz - KLmt
def _VBFreeEnergy(self,obs,use_ext=True):
return - self._VBLowerBound2(obs,use_ext)
def fit(self,obs,niter=200,eps=1e-4,ifreq=100,init=True,plot=False,use_ext=False):
if init : self._init_params(obs)
F_old = 1.0e50
for i in range(niter):
old_pi = np.copy(self.pi)
old_m = np.copy(self.m)
old_s = np.copy(self.s)
self._VBE(obs,use_ext)
self._VBM(obs)
F_new = self._VBFreeEnergy(obs,use_ext)
dF = F_new - F_old
if dF < 0.0:
print "%8dth iter, Free Energy = %10.4e, dF = %10.4e" %(i,F_new,dF)
else :
print "%8dth iter, Free Energy = %10.4e, dF = %10.4e warning" \
%(i,F_new,dF)
if abs(dF) < eps :
print dF, " < ", eps, "Converged"
break
#conv_u = np.allclose(self.pi,old_pi)
#conv_m = np.allclose(self.m,old_m)
#conv_s = np.allclose(self.s,old_s)
#if conv_u and conv_m and conv_s:
# break
F_old = F_new
if plot:
self.plotPDF(obs)
return self
def showModel(self,min_pi=0.01):
nmix = self._nstates
params = sorted(zip(self.pi,self.m,self._et),reverse=True)
relavent_clusters = []
for k in xrange(nmix):
if params[k][0] < min_pi:
break
relavent_clusters.append(params[k])
print "%dth component, pi = %8.3g, mu = %8.3g, tau = %8.3g" \
% (k+1,params[k][0],params[k][1],params[k][2])
return relavent_clusters
def pdf(self,x,min_pi=0.01):
params = self.showModel(min_pi)
pi = -np.sort(-self.pi)[:len(params)]
pi = pi / pi.sum()
y = np.array([GaussianPDF(x,p[1],p[2]) * pi[k] \
for k,p in enumerate(params)])
return y
def plotPDF(self,obs,bins=100,min_pi=0.01):
try :
import matplotlib.pyplot as plt
except ImportError :
print "cannot import pyplot"
return
x = np.linspace(min(obs),max(obs),bins)
y = self.pdf(x,min_pi)
plt.hist(obs,bins,label="observed",normed=True)
plt.plot(x,y.sum(0),label="sum",linewidth=3)
for k,yy in enumerate(y) :
plt.plot(x,yy,label="%dth cluster"%(k+1),linewidth=3)
plt.legend(loc=0)
plt.show()
def decode(self,obs):
z = self._evaluateHiddenState(readObs(obs))
codes = z.argmax(1)
clust = [[] for i in range(z.shape[1])]
for (o,c) in (obs,codes):
clust[c].append(obs)
for cl in clust:
cl = np.array(cl)
return codes,clust
def test1(nmix,niter=10000):
Y = testData2(500)
model = VBGMM1D(nmix)
model.fit(Y,niter)
model.plotPDF(Y)
if __name__ == "__main__":
from sys import argv
nmix = int(argv[1])
test1(nmix)
| bsd-3-clause | 8,080,979,700,107,704,000 | 29.272727 | 84 | 0.577099 | false | 2.515271 | false | false | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/_pytest/main.py | 1 | 26632 | """ core implementation of testing process: init, session, runtest loop. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fnmatch
import functools
import os
import pkgutil
import sys
import warnings
import attr
import py
import six
import _pytest._code
from _pytest import nodes
from _pytest.config import directory_arg
from _pytest.config import hookimpl
from _pytest.config import UsageError
from _pytest.deprecated import PYTEST_CONFIG_GLOBAL
from _pytest.outcomes import exit
from _pytest.runner import collect_one_node
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini(
"norecursedirs",
"directory patterns to avoid for recursion",
type="args",
default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
)
parser.addini(
"testpaths",
"directories to search for tests when no files or directories are given in the "
"command line.",
type="args",
default=[],
)
# parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
# )
group = parser.getgroup("general", "running and selection options")
group._addoption(
"-x",
"--exitfirst",
action="store_const",
dest="maxfail",
const=1,
help="exit instantly on first error or failed test.",
),
group._addoption(
"--maxfail",
metavar="num",
action="store",
type=int,
dest="maxfail",
default=0,
help="exit after first num failures or errors.",
)
group._addoption(
"--strict",
action="store_true",
help="marks not registered in configuration file raise errors.",
)
group._addoption(
"-c",
metavar="file",
type=str,
dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit "
"configuration files.",
)
group._addoption(
"--continue-on-collection-errors",
action="store_true",
default=False,
dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.",
)
group._addoption(
"--rootdir",
action="store",
dest="rootdir",
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
"'$HOME/root_dir'.",
)
group = parser.getgroup("collect", "collection")
group.addoption(
"--collectonly",
"--collect-only",
action="store_true",
help="only collect tests, don't execute them.",
),
group.addoption(
"--pyargs",
action="store_true",
help="try to interpret all arguments as python packages.",
)
group.addoption(
"--ignore",
action="append",
metavar="path",
help="ignore path during collection (multi-allowed).",
)
group.addoption(
"--ignore-glob",
action="append",
metavar="path",
help="ignore path pattern during collection (multi-allowed).",
)
group.addoption(
"--deselect",
action="append",
metavar="nodeid_prefix",
help="deselect item during collection (multi-allowed).",
)
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption(
"--confcutdir",
dest="confcutdir",
default=None,
metavar="dir",
type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.",
)
group.addoption(
"--noconftest",
action="store_true",
dest="noconftest",
default=False,
help="Don't load any conftest.py files.",
)
group.addoption(
"--keepduplicates",
"--keep-duplicates",
action="store_true",
dest="keepduplicates",
default=False,
help="Keep duplicate tests.",
)
group.addoption(
"--collect-in-virtualenv",
action="store_true",
dest="collect_in_virtualenv",
default=False,
help="Don't ignore tests in a local virtualenv directory",
)
group = parser.getgroup("debugconfig", "test session debugging and configuration")
group.addoption(
"--basetemp",
dest="basetemp",
default=None,
metavar="dir",
help=(
"base temporary directory for this test run."
"(warning: this directory is removed if it exists)"
),
)
class _ConfigDeprecated(object):
def __init__(self, config):
self.__dict__["_config"] = config
def __getattr__(self, attr):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return getattr(self._config, attr)
def __setattr__(self, attr, val):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return setattr(self._config, attr, val)
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self._config)
def pytest_configure(config):
__import__("pytest").config = _ConfigDeprecated(config) # compatibility
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
raise
except Failed:
session.exitstatus = EXIT_TESTSFAILED
except (KeyboardInterrupt, exit.Exception):
excinfo = _pytest._code.ExceptionInfo.from_current()
exitstatus = EXIT_INTERRUPTED
if initstate <= 2 and isinstance(excinfo.value, exit.Exception):
sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg))
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except: # noqa
excinfo = _pytest._code.ExceptionInfo.from_current()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught unexpected SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session, exitstatus=session.exitstatus
)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path):
"""Attempts to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script"""
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
if not bindir.isdir():
return False
activates = (
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
)
return any([fname.basename in activates for fname in bindir.listdir()])
def pytest_ignore_collect(path, config):
ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if py.path.local(path) in ignore_paths:
return True
ignore_globs = config._getconftest_pathlist(
"collect_ignore_glob", path=path.dirpath()
)
ignore_globs = ignore_globs or []
excludeglobopt = config.getoption("ignore_glob")
if excludeglobopt:
ignore_globs.extend([py.path.local(x) for x in excludeglobopt])
if any(
fnmatch.fnmatch(six.text_type(path), six.text_type(glob))
for glob in ignore_globs
):
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if not allow_in_venv and _in_venv(path):
return True
return False
def pytest_collection_modifyitems(items, config):
deselect_prefixes = tuple(config.getoption("deselect") or [])
if not deselect_prefixes:
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
@contextlib.contextmanager
def _patched_find_module():
"""Patch bug in pkgutil.ImpImporter.find_module
When using pkgutil.find_loader on python<3.4 it removes symlinks
from the path due to a call to os.path.realpath. This is not consistent
with actually doing the import (in these versions, pkgutil and __import__
did not share the same underlying code). This can break conftest
discovery for pytest where symlinks are involved.
The only supported python<3.4 by pytest is python 2.7.
"""
if six.PY2: # python 3.4+ uses importlib instead
def find_module_patched(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
# original: path = [os.path.realpath(self.path)]
path = [self.path]
try:
file, filename, etc = pkgutil.imp.find_module(subname, path)
except ImportError:
return None
return pkgutil.ImpLoader(fullname, file, filename, etc)
old_find_module = pkgutil.ImpImporter.find_module
pkgutil.ImpImporter.find_module = find_module_patched
try:
yield
finally:
pkgutil.ImpImporter.find_module = old_find_module
else:
yield
class FSHookProxy(object):
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = "builtins" # for py3
class Failed(Exception):
""" signals a stop as failed test run. """
@attr.s
class _bestrelpath_cache(dict):
path = attr.ib()
def __missing__(self, path):
r = self.path.bestrelpath(path)
self[path] = r
return r
class Session(nodes.FSCollector):
Interrupted = Interrupted
Failed = Failed
def __init__(self, config):
nodes.FSCollector.__init__(
self, config.rootdir, parent=None, config=config, session=self, nodeid=""
)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.shouldfail = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self._initialpaths = frozenset()
# Keep track of any collected nodes in here, so we don't duplicate fixtures
self._node_cache = {}
self._bestrelpathcache = _bestrelpath_cache(config.rootdir)
# Dirnames of pkgs with dunder-init files.
self._pkg_roots = {}
self.config.pluginmanager.register(self, name="session")
def __repr__(self):
return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % (
self.__class__.__name__,
self.name,
getattr(self, "exitstatus", "<UNSET>"),
self.testsfailed,
self.testscollected,
)
def _node_location_to_relpath(self, node_path):
# bestrelpath is a quite slow function
return self._bestrelpathcache[node_path]
@hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldfail:
raise self.Failed(self.shouldfail)
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, "wasxfail"):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py files
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(
session=self, config=self.config, items=items
)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
initialpaths = []
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
initialpaths.append(parts[0])
self._initialpaths = frozenset(initialpaths)
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
# XXX: test this
raise UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for initialpart in self._initialparts:
arg = "::".join(map(str, initialpart))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
from _pytest.python import Package
names = self._parsearg(arg)
argpath = names.pop(0)
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
# No point in finding packages when collecting doctests
if not self.config.getoption("doctestmodules", False):
pm = self.config.pluginmanager
for parent in reversed(argpath.parts()):
if pm._confcutdir and pm._confcutdir.relto(parent):
break
if parent.isdir():
pkginit = parent.join("__init__.py")
if pkginit.isfile():
if pkginit not in self._node_cache:
col = self._collectfile(pkginit, handle_dupes=False)
if col:
if isinstance(col[0], Package):
self._pkg_roots[parent] = col[0]
# always store a list in the cache, matchnodes expects it
self._node_cache[col[0].fspath] = [col[0]]
# If it's a directory argument, recurse and look for any Subpackages.
# Let the Package collector deal with subnodes, don't collect here.
if argpath.check(dir=1):
assert not names, "invalid arg %r" % (arg,)
seen_dirs = set()
for path in argpath.visit(
fil=self._visit_filter, rec=self._recurse, bf=True, sort=True
):
dirpath = path.dirpath()
if dirpath not in seen_dirs:
# Collect packages first.
seen_dirs.add(dirpath)
pkginit = dirpath.join("__init__.py")
if pkginit.exists():
for x in self._collectfile(pkginit):
yield x
if isinstance(x, Package):
self._pkg_roots[dirpath] = x
if dirpath in self._pkg_roots:
# Do not collect packages here.
continue
for x in self._collectfile(path):
key = (type(x), x.fspath)
if key in self._node_cache:
yield self._node_cache[key]
else:
self._node_cache[key] = x
yield x
else:
assert argpath.check(file=1)
if argpath in self._node_cache:
col = self._node_cache[argpath]
else:
collect_root = self._pkg_roots.get(argpath.dirname, self)
col = collect_root._collectfile(argpath, handle_dupes=False)
if col:
self._node_cache[argpath] = col
m = self.matchnodes(col, names)
# If __init__.py was the only file requested, then the matched node will be
# the corresponding Package, and the first yielded item will be the __init__
# Module itself, so just use that. If this special case isn't taken, then all
# the files in the package will be yielded.
if argpath.basename == "__init__.py":
yield next(m[0].collect())
return
for y in m:
yield y
def _collectfile(self, path, handle_dupes=True):
assert path.isfile(), "%r is not a file (isdir=%r, exists=%r, islink=%r)" % (
path,
path.isdir(),
path.exists(),
path.islink(),
)
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if path in duplicate_paths:
return ()
else:
duplicate_paths.add(path)
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, dirpath):
if dirpath.basename == "__pycache__":
return False
ihook = self.gethookproxy(dirpath.dirpath())
if ihook.pytest_ignore_collect(path=dirpath, config=self.config):
return False
for pat in self._norecursepatterns:
if dirpath.check(fnmatch=pat):
return False
ihook = self.gethookproxy(dirpath)
ihook.pytest_collect_directory(path=dirpath, parent=self)
return True
if six.PY2:
@staticmethod
def _visit_filter(f):
return f.check(file=1) and not f.strpath.endswith("*.pyc")
else:
@staticmethod
def _visit_filter(f):
return f.check(file=1)
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path."""
try:
with _patched_find_module():
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
with _patched_find_module():
path = loader.get_filename(x)
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
raise UsageError(
"file or package not found: " + arg + " (missing __init__.py?)"
)
raise UsageError("file not found: " + arg)
parts[0] = path.realpath()
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, nodes.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, nodes.Collector)
key = (type(node), node.nodeid)
if key in self._node_cache:
rep = self._node_cache[key]
else:
rep = collect_one_node(node)
self._node_cache[key] = rep
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
else:
# report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134)
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, nodes.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, nodes.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
| mit | -9,025,339,223,619,723,000 | 33.452781 | 109 | 0.576938 | false | 4.259076 | true | false | false |
listyque/TACTIC-Handler | thlib/ui/items/ui_commit_item.py | 1 | 4255 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'items\ui_commit_item.ui'
#
# Created: Sat Oct 5 00:17:13 2019
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui
from thlib.side.Qt import QtCore
class Ui_commitItem(object):
def setupUi(self, commitItem):
commitItem.setObjectName("commitItem")
commitItem.resize(84, 72)
self.gridLayout = QtGui.QGridLayout(commitItem)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.previewVerticalLayout = QtGui.QVBoxLayout()
self.previewVerticalLayout.setSpacing(0)
self.previewVerticalLayout.setContentsMargins(4, 4, 4, 4)
self.previewVerticalLayout.setObjectName("previewVerticalLayout")
self.previewLabel = QtGui.QLabel(commitItem)
self.previewLabel.setMinimumSize(QtCore.QSize(64, 64))
self.previewLabel.setMaximumSize(QtCore.QSize(64, 64))
self.previewLabel.setStyleSheet("QLabel {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 rgba(175, 175, 175, 16), stop: 1 rgba(0, 0, 0, 0));\n"
" border: 0px;\n"
" border-radius: 4px;\n"
" padding: 0px 0px;\n"
"}")
self.previewLabel.setTextFormat(QtCore.Qt.RichText)
self.previewLabel.setAlignment(QtCore.Qt.AlignCenter)
self.previewLabel.setObjectName("previewLabel")
self.previewVerticalLayout.addWidget(self.previewLabel)
spacerItem = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Ignored)
self.previewVerticalLayout.addItem(spacerItem)
self.previewVerticalLayout.setStretch(1, 1)
self.gridLayout.addLayout(self.previewVerticalLayout, 0, 0, 3, 1)
self.nameVerticalLayout = QtGui.QVBoxLayout()
self.nameVerticalLayout.setSpacing(0)
self.nameVerticalLayout.setContentsMargins(-1, -1, -1, 3)
self.nameVerticalLayout.setObjectName("nameVerticalLayout")
self.fileNameLabel = QtGui.QLabel(commitItem)
self.fileNameLabel.setMinimumSize(QtCore.QSize(0, 20))
self.fileNameLabel.setMaximumSize(QtCore.QSize(16777215, 24))
font = Qt4Gui.QFont()
font.setWeight(75)
font.setBold(True)
self.fileNameLabel.setFont(font)
self.fileNameLabel.setStyleSheet("QLabel {\n"
" background-color: transparent;\n"
" border-bottom: 2px solid qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(128, 128, 128, 64), stop:1 rgba(128, 128,128, 0));\n"
"}")
self.fileNameLabel.setTextFormat(QtCore.Qt.PlainText)
self.fileNameLabel.setObjectName("fileNameLabel")
self.nameVerticalLayout.addWidget(self.fileNameLabel)
self.gridLayout.addLayout(self.nameVerticalLayout, 0, 1, 1, 2)
self.descriptionLerticalLayout = QtGui.QVBoxLayout()
self.descriptionLerticalLayout.setSpacing(0)
self.descriptionLerticalLayout.setObjectName("descriptionLerticalLayout")
self.commentLabel = QtGui.QLabel(commitItem)
self.commentLabel.setMinimumSize(QtCore.QSize(0, 25))
self.commentLabel.setTextFormat(QtCore.Qt.PlainText)
self.commentLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.commentLabel.setWordWrap(True)
self.commentLabel.setMargin(2)
self.commentLabel.setObjectName("commentLabel")
self.descriptionLerticalLayout.addWidget(self.commentLabel)
self.gridLayout.addLayout(self.descriptionLerticalLayout, 2, 1, 1, 2)
self.infoHorizontalLayout = QtGui.QHBoxLayout()
self.infoHorizontalLayout.setSpacing(0)
self.infoHorizontalLayout.setObjectName("infoHorizontalLayout")
self.gridLayout.addLayout(self.infoHorizontalLayout, 1, 1, 1, 2)
self.gridLayout.setColumnStretch(1, 1)
self.gridLayout.setColumnStretch(2, 1)
self.retranslateUi(commitItem)
QtCore.QMetaObject.connectSlotsByName(commitItem)
def retranslateUi(self, commitItem):
commitItem.setWindowTitle(u"Form")
| epl-1.0 | 1,223,322,050,095,421,700 | 48.476744 | 147 | 0.709988 | false | 3.57563 | false | false | false |
sato9hara/defragTrees | paper/tests/paper_synthetic2.py | 1 | 3367 | # -*- coding: utf-8 -*-
"""
@author: Satoshi Hara
"""
import sys
import os
sys.path.append(os.path.abspath('./'))
sys.path.append(os.path.abspath('./baselines/'))
sys.path.append(os.path.abspath('../'))
import numpy as np
import paper_sub
from RForest import RForest
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colorbar as colorbar
def plotTZ(filename=None):
t = np.linspace(0, 1, 101)
z = 0.25 + 0.5 / (1 + np.exp(- 20 * (t - 0.5))) + 0.05 * np.cos(t * 2 * np.pi)
cmap = cm.get_cmap('cool')
fig, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[19, 1]})
poly1 = [[0, 0]]
poly1.extend([[t[i], z[i]] for i in range(t.size)])
poly1.extend([[1, 0], [0, 0]])
poly2 = [[0, 1]]
poly2.extend([[t[i], z[i]] for i in range(t.size)])
poly2.extend([[1, 1], [0, 1]])
poly1 = plt.Polygon(poly1,fc=cmap(0.0))
poly2 = plt.Polygon(poly2,fc=cmap(1.0))
ax1.add_patch(poly1)
ax1.add_patch(poly2)
ax1.set_xlabel('x1', size=22)
ax1.set_ylabel('x2', size=22)
ax1.set_title('True Data', size=28)
colorbar.ColorbarBase(ax2, cmap=cmap, format='%.1f')
ax2.set_ylabel('Output y', size=22)
plt.show()
if not filename is None:
plt.savefig(filename, format="pdf", bbox_inches="tight")
plt.close()
def plotForest(filename=None):
forest = RForest(modeltype='classification')
forest.fit('./result/result_synthetic2/forest/')
X = np.c_[np.kron(np.linspace(0, 1, 201), np.ones(201)), np.kron(np.ones(201), np.linspace(0, 1, 201))]
forest.plot(X, 0, 1, box0=np.array([[0.0, 0.0], [1.0, 1.0]]), filename=filename)
if __name__ == "__main__":
# setting
prefix = 'synthetic2'
seed = 0
num = 1000
dim = 2
# data - boundary
b = 0.9
t = np.linspace(0, 1, 101)
z = 0.25 + 0.5 / (1 + np.exp(- 20 * (t - 0.5))) + 0.05 * np.cos(t * 2 * np.pi)
# data - train
np.random.seed(seed)
Xtr = np.random.rand(num, dim)
ytr = np.zeros(num)
ytr = (Xtr[:, 1] > 0.25 + 0.5 / (1 + np.exp(- 20 * (Xtr[:, 0] - 0.5))) + 0.05 * np.cos(Xtr[:, 0] * 2 * np.pi))
ytr = np.logical_xor(ytr, np.random.rand(num) > b)
# data - test
Xte = np.random.rand(num, dim)
yte = np.zeros(num)
yte = (Xte[:, 1] > 0.25 + 0.5 / (1 + np.exp(- 20 * (Xte[:, 0] - 0.5))) + 0.05 * np.cos(Xte[:, 0] * 2 * np.pi))
yte = np.logical_xor(yte, np.random.rand(num) > b)
# save
dirname = './result/result_%s' % (prefix,)
if not os.path.exists('./result/'):
os.mkdir('./result/')
if not os.path.exists(dirname):
os.mkdir(dirname)
trfile = '%s/%s_train.csv' % (dirname, prefix)
tefile = '%s/%s_test.csv' % (dirname, prefix)
np.savetxt(trfile, np.c_[Xtr, ytr], delimiter=',')
np.savetxt(tefile, np.c_[Xte, yte], delimiter=',')
# demo_R
Kmax = 10
restart = 20
treenum = 100
M = range(1, 11)
#paper_sub.run(prefix, Kmax, restart, treenum=treenum, modeltype='classification', plot=True, plot_line=[[t, z]])
paper_sub.run(prefix, Kmax, restart, treenum=treenum, modeltype='classification', plot=True, plot_line=[[t, z]], M=M, compare=True)
# plot
plotTZ('%s/%s_true.pdf' % (dirname, prefix))
plotForest('%s/%s_rf_tree05_seed00.pdf' % (dirname, prefix))
| mit | -4,752,114,063,472,154,000 | 32.346535 | 135 | 0.571429 | false | 2.659558 | false | false | false |
mbdriscoll/indigo | indigo/backends/backend.py | 1 | 25493 | import logging
import abc, time
import numpy as np
import scipy.sparse as spp
from contextlib import contextmanager
import indigo.operators as op
from indigo.util import profile
log = logging.getLogger(__name__)
class Backend(object):
"""
Provides the routines and data structures necessary to implement
a linear operator chain on different platforms.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, device_id=0):
profile._backend = self
class dndarray(object):
"""
N-dimensional array in device memory.
Parameters
----------
backend : indigo.backends.Backend
Backend instance.
shape : tuple
Array shape, a la numpy.
dtype : numpy.dtype
Datatype.
ld : tuple
Shape of array before slicing, used for ldb/ldc values.
own : bool
True if this object malloc'ed the underlying memory.
data : ?
Handle to underlying memory.
"""
__metaclass__ = abc.ABCMeta
_memory = dict()
def __init__(self, backend, shape, dtype,
ld=None, own=True, data=None, name=''):
assert isinstance(shape, (tuple,list))
self.dtype = dtype
self.shape = shape
self._backend = backend
self._leading_dim = ld or shape[0]
self._own = own
#assert isinstance(backend, Backend), (backend, type(backend))
if data is None:
self._arr = self._malloc(shape, dtype)
self._memory[ id(self._arr) ] = (name, shape, dtype)
else:
self._arr = data
def reshape(self, new_shape):
old_shape = self.shape
old_leading_dim = self._leading_dim
if (-1) in new_shape:
one = new_shape.index(-1)
new_size = -int(np.prod(new_shape))
old_size = self.size
factor = old_size // new_size
assert new_size * factor == old_size, \
"Cannot reshape {} into {}. (size mismatch)".format(old_shape, new_shape)
new_shape = list(new_shape)
new_shape[one] = factor
new_shape = tuple(new_shape)
if new_shape[0] > old_shape[0]:
contig = old_shape[0] == self._leading_dim
assert contig, "Cannot stack non-contiguous columns."
assert np.prod(new_shape) == self.size
# min for Kron -- make new lda
# max for VStack -- preserve original lda
#new_leading_dim = min(new_shape[0], old_leading_dim) # FIXME: need consistent semantics for reshape
#new_leading_dim = old_leading_dim # works with VStack
#new_leading_dim = new_shape[0] # works with Kron
if new_shape[0] < old_shape[0]:
#assert self.contiguous, "Cannot stack vectors of non-contiguous matrix."
new_leading_dim = new_shape[0]
else:
new_leading_dim = old_leading_dim
return self._backend.dndarray( self._backend,
new_shape, dtype=self.dtype, ld=new_leading_dim, own=False, data=self._arr)
@property
def size(self):
return np.prod(self.shape)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def nbytes(self):
return self.size * np.dtype(self.dtype).itemsize
@property
def ndim(self):
return len(self.shape)
@property
def contiguous(self):
if self.ndim == 1:
return True
else:
return self._leading_dim == self.shape[0]
def copy_from(self, arr):
''' copy from device when both arrays exist '''
assert isinstance(arr, np.ndarray)
if self.size != arr.size:
raise ValueError("size mismatch, expected {} got {}" \
.format(self.shape, arr.shape))
if self.dtype != arr.dtype:
raise TypeError("dtype mismatch, expected {} got {}" \
.format(self.dtype, arr.dtype))
if not arr.flags['F_CONTIGUOUS']:
raise TypeError("order mismatch, expected 'F' got {}" \
.format(arr.flags['F_CONTIGUOUS']))
self._copy_from(arr)
def copy_to(self, arr):
''' copy to device when both arrays exist '''
assert isinstance(arr, np.ndarray)
if self.size != arr.size:
raise ValueError("size mismatch, expected {} got {}" \
.format(self.shape, arr.shape))
if self.dtype != arr.dtype:
raise TypeError("dtype mismatch, expected {} got {}" \
.format(self.dtype, arr.dtype))
self._copy_to(arr)
def to_host(self):
''' copy from device when host array doesnt exist '''
arr = np.ndarray(self.shape, self.dtype, order='F')
self.copy_to(arr)
return arr
@contextmanager
def on_host(self):
arr_h = self.to_host()
yield arr_h
self.copy_from(arr_h)
def copy(self, other=None, name=''):
''' copy array on device'''
if other:
assert isinstance(other, self._backend.dndarray)
self._copy(other)
else:
other = self._backend.zero_array(self.shape, self.dtype, name=name)
other._copy(self)
return other
@classmethod
def to_device(cls, backend, arr, name=''):
''' copy to device when device array doesnt exist '''
arr_f = np.require(arr, requirements='F')
d_arr = cls(backend, arr.shape, arr.dtype, name=name)
d_arr.copy_from(arr_f)
return d_arr
def __del__(self):
""" destructor """
if self._own and hasattr(self, '_arr'):
self._memory.pop( id(self._arr) )
self._free()
def __setitem__(self, slc, other):
#FIXME don't ignore slc
assert not(slc.start or slc.stop), "dndarray setitem cant slice"
self._copy(other)
@abc.abstractmethod
def __getitem__(self, slc):
"""
Slice notation. Slices must be contiguous in memory. Returns a view.
"""
raise NotImplementedError()
@abc.abstractmethod
def _copy_from(self, arr):
""" copy HtoD implementation """
raise NotImplementedError()
@abc.abstractmethod
def _copy_to(self, arr):
""" copy DtoH implementation """
raise NotImplementedError()
@abc.abstractmethod
def _copy(self, arr):
""" copy DtoD implementation """
raise NotImplementedError()
@abc.abstractmethod
def _malloc(self, shape, dtype):
""" malloc implementation """
raise NotImplementedError()
@abc.abstractmethod
def _free(self):
""" malloc implementation """
raise NotImplementedError()
@abc.abstractmethod
def _zero(self):
""" set to zero """
raise NotImplementedError()
@staticmethod
def from_param(obj):
""" convert _arr into ctypes object """
raise NotImplementedError()
def copy_array(self, arr, name=''):
return self.dndarray.to_device(self, arr, name=name)
def zero_array(self, shape, dtype, name=''):
d_arr = self.empty_array(shape, dtype, name=name)
d_arr._zero()
return d_arr
def zeros_like(self, other, name=''):
return self.zero_array(other.shape, other.dtype, name=name)
def empty_array(self, shape, dtype, name=''):
d_arr = self.dndarray(self, shape, dtype, name=name)
return d_arr
def rand_array(self, shape, dtype=np.dtype('complex64'), name=''):
x = np.random.random(shape) + 1j*np.random.random(shape)
x = np.require(x, dtype=np.dtype('complex64'), requirements='F')
x_d = self.copy_array(x, name=name)
return x_d
def get_max_threads(self):
return 1
def barrier(self):
pass
def mem_usage(self):
nbytes = 0
log.info("Memory report:")
table = []
for name, shape, dtype in self.dndarray._memory.values():
n = np.prod(shape) * dtype.itemsize
table.append( (name, n, shape, dtype) )
nbytes += n
for name, n, shape, dtype in sorted(table, key=lambda tup: tup[1]):
if n > 1e6:
log.info(" %40s: % 3.0f MB, %20s, %15s", name, n/1e6, shape, dtype)
return nbytes
@contextmanager
def scratch(self, shape=None, nbytes=None):
assert not (shape is not None and nbytes is not None), \
"Specify either shape or nbytes to backend.scratch()."
if nbytes is not None:
shape = (nbytes//np.dtype('complex64').itemsize,)
size = np.prod(shape)
if hasattr(self, '_scratch'):
pos = self._scratch_pos
total = self._scratch.size
assert pos + size <= total, "Not enough scratch memory (wanted %d MB, but only have %d MB available of %d MB total)." % (size/1e6, (total-pos)/1e6, total/1e6)
mem = self._scratch[pos:pos+size].reshape(shape)
self._scratch_pos += size
yield mem
self._scratch_pos -= size
else:
log.debug("dynamically allocating scratch space in shape %s", shape)
mem = self.zero_array(shape, dtype=np.complex64)
yield mem
del mem
# -----------------------------------------------------------------------
# Operator Building Interface
# -----------------------------------------------------------------------
def SpMatrix(self, M, **kwargs):
""" A := M """
assert isinstance(M, spp.spmatrix)
return op.SpMatrix(self, M, **kwargs)
def DenseMatrix(self, M, **kwargs):
""" A := M """
assert isinstance(M, np.ndarray)
assert M.ndim == 2
return op.DenseMatrix(self, M, **kwargs)
def Diag(self, v, **kwargs):
""" A := diag(v) """
v = np.require(v, requirements='F')
if v.ndim > 1:
v = v.flatten(order='A')
dtype = kwargs.get('dtype', np.dtype('complex64'))
M = spp.diags( v, offsets=0 ).astype(dtype)
return self.SpMatrix(M, **kwargs)
def Adjoint(self, A, **kwargs):
""" C := A^H """
return op.Adjoint(self, A, **kwargs)
def KronI(self, c, B, **kwargs):
""" C := I_c (KRON) B """
I = self.Eye(c)
return op.Kron(self, I, B, **kwargs)
def Kron(self, A, B, **kwargs):
""" C := A (KRON) B """
return op.Kron(self, A, B, **kwargs)
def BlockDiag(self, Ms, **kwargs):
return op.BlockDiag(self, *Ms, **kwargs)
def VStack(self, Ms, **kwargs):
return op.VStack(self, *Ms, **kwargs)
def HStack (self, Ms, **kwargs):
return op.HStack(self, *Ms, **kwargs)
def UnscaledFFT(self, shape, dtype, **kwargs):
""" A := FFT{ . } """
return op.UnscaledFFT(self, shape, dtype, **kwargs)
def Eye(self, n, dtype=np.dtype('complex64'), **kwargs):
""" A := I_n """
return op.Eye(self, n, dtype=dtype, **kwargs)
def One(self, shape, dtype=np.dtype('complex64'), **kwargs):
""" A := [1] (matrix of ones) """
return op.One(self, shape, dtype=dtype, **kwargs)
def CopyIn(self, shape, dtype, **kwargs):
return op.CopyIn(self, shape, dtype)
def CopyOut(self, shape, dtype, **kwargs):
return op.CopyOut(self, shape, dtype)
def FFT(self, shape, dtype, **kwargs):
""" Unitary FFT """
n = np.prod(shape)
s = np.ones(n, order='F', dtype=dtype) / np.sqrt(n)
S = self.Diag(s, name='scale')
F = self.UnscaledFFT(shape, dtype, **kwargs)
return S*F
def FFTc(self, ft_shape, dtype, normalize=True, **kwargs):
""" Centered, Unitary FFT """
mod_slice = [ slice(d) for d in ft_shape ]
idx = np.mgrid[mod_slice]
mod = 0
for i in range(len(ft_shape)):
c = ft_shape[i] // 2
mod += (idx[i] - c / 2.0) * (c / ft_shape[i])
mod = np.exp(1j * 2.0 * np.pi * mod).astype(dtype)
M = self.Diag(mod, name='mod')
if normalize:
F = self.FFT(ft_shape, dtype=dtype, **kwargs)
else:
F = self.UnscaledFFT(ft_shape, dtype=dtype, **kwargs)
return M*F*M
def Zpad(self, M, N, mode='center', dtype=np.dtype('complex64'), **kwargs):
slc = []
if mode == 'center':
for m, n in zip(M, N):
slc += [slice(m // 2 + int(np.ceil(-n / 2)),
m // 2 + int(np.ceil( n / 2))), ]
elif mode == 'edge':
for m, n in zip(M, N):
slc.append(slice(n))
pass
x = np.arange( np.prod(M), dtype=int ).reshape(M, order='F')
rows = x[slc].flatten(order='F')
cols = np.arange(rows.size)
ones = np.ones_like(cols)
shape = np.prod(M), np.prod(N)
M = spp.coo_matrix( (ones, (rows,cols)), shape=shape, dtype=dtype )
return self.SpMatrix(M, **kwargs)
def Crop(self, M, N, dtype=np.dtype('complex64'), **kwargs):
return self.Zpad(N, M, dtype=dtype, **kwargs).H
def Interp(self, N, coord, width, table, dtype=np.dtype('complex64'), **kwargs):
assert len(N) == 3
ndim = coord.shape[0]
npts = np.prod( coord.shape[1:] )
coord = coord.reshape((ndim,-1), order='F')
from indigo.interp import interp_mat
M = interp_mat(npts, N, width, table, coord, 1).astype(dtype)
return self.SpMatrix(M, **kwargs)
def NUFFT(self, M, N, coord, width=3, n=128, oversamp=None, dtype=np.dtype('complex64'), **kwargs):
assert len(M) == 3
assert len(N) == 3
assert M[1:] == coord.shape[1:]
# target 448 x 270 x 640
# 448 x 270 x 640 mkl-batch: 170.83 ms, 237.51 gflop/s back-to-back: 121.76 ms, 333.23 gflop/s
# 1.45 1.30 1.33
# 432 x 280 x 640 mkl-batch: 183.85 ms 220.7 gflop/s back-to-back: 149.62 ms 271.19 gflop/s
# 1.40 1.35 1.33
# 432 x 270 x 640 mkl-batch: 168.62 ms 231.57 gflop/s back-to-back: 118.31 ms 330.05 gflop/s
# 1.40 1.30 1.33
if isinstance(oversamp, tuple):
omin = min(oversamp)
else:
omin = oversamp
oversamp = (omin, omin, omin)
import scipy.signal as signal
from indigo.noncart import rolloff3
ndim = coord.shape[0]
npts = np.prod( coord.shape[1:] )
oN = list(N)
for i in range(3):
oN[i] *= oversamp[i]
oN = tuple(int(on) for on in oN)
Z = self.Zpad(oN, N, dtype=dtype, name='zpad')
F = self.FFTc(oN, dtype=dtype, name='fft')
beta = np.pi * np.sqrt(((width * 2. / omin) * (omin- 0.5)) ** 2 - 0.8)
kb = signal.kaiser(2 * n + 1, beta)[n:]
G = self.Interp(oN, coord, width, kb, dtype=np.float32, name='interp')
r = rolloff3(omin, width, beta, N)
R = self.Diag(r, name='apod')
return G*F*Z*R
def Convolution(self, kernel, normalize=True, name='noname'):
F = self.FFTc(kernel.shape, name='%s.convF' % name, normalize=normalize, dtype=np.complex64)
K = self.Diag(F * kernel, name='%s.convK' % name)
I = self.Eye(F.shape[0])
return F.H * K * F
# -----------------------------------------------------------------------
# BLAS Routines
# -----------------------------------------------------------------------
def axpby(self, beta, y, alpha, x):
""" y = beta * y + alpha * x """
raise NotImplementedError()
def dot(self, x, y):
""" returns x^T * y """
raise NotImplementedError()
def norm2(self, x):
""" returns ||x||_2"""
raise NotImplementedError()
def scale(self, x, alpha):
""" x *= alpha """
raise NotImplementedError()
def pdot(self, x, y, comm):
xHy = self.dot(x, y)
if comm is not None:
xHy = comm.allreduce( xHy )
return xHy
def pnorm2(self, x, comm):
xTx = self.norm2(x)
if comm is not None:
xTx = comm.allreduce( xTx )
return xTx
def cgemm(self, y, M, x, alpha, beta, forward):
"""
Peform a dense matrix-matrix multiplication.
"""
raise NotImplementedError()
def csymm(self, y, M, x, alpha, beta, left=True):
"""
Peform a symmetric dense matrix-matrix multiplication for real symmetric matrices.
"""
raise NotImplementedError()
# -----------------------------------------------------------------------
# FFT Routines
# -----------------------------------------------------------------------
@abc.abstractmethod
def fftn(self, y, x):
"""
Peform an unscaled multidimensional forward FFT on x.
"""
raise NotImplementedError()
@abc.abstractmethod
def ifftn(self, y, x):
"""
Peform an unscaled multidimensional inverse FFT on x.
"""
raise NotImplementedError()
def _fft_workspace_size(self, x_shape):
return 0
@abc.abstractmethod
def ccsrmm(self, y, A_shape, A_indx, A_ptr, A_vals, x, alpha=1, beta=0, adjoint=False, exwrite=False):
"""
Computes Y[:] = A * X.
"""
raise NotImplementedError()
@abc.abstractmethod
def cdiamm(self, y, shape, offsets, data, x, alpha=1.0, beta=0.0, adjoint=True):
"""
Computes Y[:] = A * X.
"""
raise NotImplementedError()
@abc.abstractmethod
def onemm(self, y, x, alpha=1, beta=0):
"""
Computes Y[:] = beta * Y + alpha * [1] * X.
"""
raise NotImplementedError()
class csr_matrix(object):
"""
A device-resident sparse matrix in CSR format.
"""
_index_base = 0
def __init__(self, backend, A, name='mat'):
"""
Create a matrix from the given `scipy.sparse.sppmatrix`.
"""
if not isinstance(A, spp.csr_matrix):
A = A.tocsr()
A = self._type_correct(A)
self._backend = backend
self.rowPtrs = backend.copy_array(A.indptr + self._index_base, name=name+".rowPtrs")
self.colInds = backend.copy_array(A.indices + self._index_base, name=name+".colInds")
self.values = backend.copy_array(A.data, name=name+".data")
self.shape = A.shape
self.dtype = A.dtype
# fraction of nonzero rows/columns
try:
from indigo.backends._customcpu import inspect
nzrow, nzcol, self._exwrite = inspect(A.shape[0], A.shape[1], A.indices, A.indptr)
self._row_frac = nzrow / A.shape[0]
self._col_frac = nzcol / A.shape[1]
log.debug("matrix %s has %2d%% nonzero rows and %2d%% nonzero columns",
name, 100*self._row_frac, 100*self._col_frac)
log.debug("matrix %s supports exwrite: %s", name, self._exwrite)
except ImportError:
self._row_frac = 1.0
self._col_frac = 1.0
log.debug("skipping exwrite inspection. Is CustomCPU backend available?")
def forward(self, y, x, alpha=1, beta=0):
""" y[:] = A * x """
assert x.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % x.dtype
assert y.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % y.dtype
assert self.values.dtype == np.dtype("complex64")
self._backend.ccsrmm(y,
self.shape, self.colInds, self.rowPtrs, self.values,
x, alpha=alpha, beta=beta, adjoint=False, exwrite=True)
def adjoint(self, y, x, alpha=1, beta=0):
""" y[:] = A.H * x """
assert x.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % x.dtype
assert y.dtype == np.dtype("complex64"), "Bad dtype: expected compelx64, got %s" % y.dtype
assert self.values.dtype == np.dtype("complex64")
self._backend.ccsrmm(y,
self.shape, self.colInds, self.rowPtrs, self.values,
x, alpha=alpha, beta=beta, adjoint=True, exwrite=self._exwrite)
@property
def nbytes(self):
return self.rowPtrs.nbytes + self.colInds.nbytes + self.values.nbytes
@property
def nnz(self):
return self.values.size
def _type_correct(self, A):
return A.astype(np.complex64)
class dia_matrix(object):
"""
A device-resident sparse matrix in DIA format.
"""
def __init__(self, backend, A, name='mat'):
"""
Create a matrix from the given `scipy.sparse.sppmatrix`.
"""
assert isinstance(A, spp.dia_matrix)
A = A.astype(np.complex64)
self._backend = backend
self.data = backend.copy_array(A.data.T, name=name+".data")
self.offsets = backend.copy_array(A.offsets, name=name+".data")
self.shape = A.shape
self.dtype = A.dtype
self._row_frac = 1
self._col_frac = 1
def forward(self, y, x, alpha=1, beta=0):
""" y[:] = A * x """
self._backend.cdiamm(y, self.shape, self.offsets, self.data,
x, alpha=alpha, beta=beta, adjoint=False)
def adjoint(self, y, x, alpha=1, beta=0):
""" y[:] = A.H * x """
self._backend.cdiamm(y, self.shape, self.offsets, self.data,
x, alpha=alpha, beta=beta, adjoint=True)
@property
def nbytes(self):
return self.offsets.nbytes + self.data.nbytes
@property
def nnz(self):
return self.data.size
# -----------------------------------------------------------------------
# Algorithms
# -----------------------------------------------------------------------
def cg(self, A, b_h, x_h, lamda=0.0, tol=1e-10, maxiter=100, team=None):
"""
Conjugate gradient. Solves for A x = b, where A is positive semi-definite.
Parameters
----------
A : function to perform A(x)
y : 1D array
x : 1D array, initial solution
maxiter : int, optional
{IterPrint, IterPlot, IterWrite, IterCompare}
"""
x = self.copy_array( x_h, name='x' )
b = self.copy_array( b_h, name='b' )
Ap = x.copy()
# r = b - A(x) - lamda * x
r = b
A.eval(Ap, x)
self.axpby(1, r, -1, Ap)
self.axpby(1, r, -lamda, x)
p = r.copy(name='p')
rr = self.pnorm2(r, team)
r0 = rr
for it in range(maxiter):
profile.extra['it'] = it
with profile("iter"):
A.eval(Ap, p)
self.axpby(1, Ap, lamda, p)
alpha = rr / self.pdot(p, Ap, team)
self.axpby(1, x, alpha, p)
self.axpby(1, r, -alpha, Ap)
r2 = self.pnorm2(r, team)
beta = r2 / rr
self.scale(p, beta)
self.axpby(1, p, 1, r)
rr = r2
resid = np.sqrt(rr / r0)
log.info("iter %d, residual %g", it, resid.real)
if resid < tol:
log.info("cg reached tolerance")
break
else:
log.info("cg reached maxiter")
x.copy_to(x_h)
def apgd(self, gradf, proxg, alpha, x_h, maxiter=100, team=None):
'''Accelerated proximal gradient descent.
Solves for min_x f(x) + g(x)
Parameters
----------
gradf : Gradient of f
proxg : Proximal of g
alpha : Step size
x0 : 1D array, initial solution
maxiter : int, optional
'''
x_k = self.copy_array(x_h)
y_k = x_k.copy()
y_k1 = x_k.copy()
x_k1 = x_k.copy()
gf = x_k.copy()
t_k = 1
for it in range(1,maxiter+1):
profile.extra['it'] = it
with profile("iter"):
gradf(gf, y_k)
self.axpby(1, x_k, -alpha, gf)
proxg(x_k, alpha)
t_k1 = (1.0 + np.sqrt(1.0 + 4.0 * t_k**2)) / 2.0
t_ratio = (t_k - 1) / t_k1
self.axpby(0, y_k1, 1+t_ratio, x_k)
self.axpby(1, y_k1, -t_ratio, x_k1)
x_k1.copy(x_k)
y_k.copy(y_k1)
log.info("iter %d", it)
x_k.copy_to(x_h)
def max(self, val, arr):
""" Computes elementwise maximum: arr[:] = max(arr, val). """
raise NotImplementedError()
| bsd-3-clause | -2,364,064,529,511,427,600 | 33.637228 | 170 | 0.506453 | false | 3.704301 | false | false | false |
yaricom/brainhash | src/experiment_cA5_10_dt_th_al_ah_bl_bh.py | 1 | 2063 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 5, delta, theta, alpha low, alpha high, beta low, beta high, batch size = 10 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 5
batch_size = 10
experiment_name = 'cA_%d_%d_dt-th-a_l-a_h-b_l-b_h' % (n_hidden, batch_size) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
| gpl-3.0 | -1,605,927,457,288,513,500 | 31.746032 | 136 | 0.641299 | false | 3.159265 | true | false | false |
thomasorb/orb | orb/utils/fft.py | 1 | 13896 | #!/usr/bin/python
# *-* coding: utf-8 *-*
# Author: Thomas Martin <[email protected]>
# File: fft.py
## Copyright (c) 2010-2020 Thomas Martin <[email protected]>
##
## This file is part of ORB
##
## ORB is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## ORB is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ORB. If not, see <http://www.gnu.org/licenses/>.
import logging
import time
import sys
import numpy as np
import math
import warnings
import scipy
import scipy.special as ss
from scipy import signal, interpolate, optimize
import gvar
import orb.utils.vector
import orb.utils.spectrum
import orb.utils.stats
import orb.utils.filters
import orb.cutils
import orb.constants
def mod2pi(a):
"""Return the smallest signed modulo 2 pi of any angle in radians
"""
return np.arctan2(np.sin(a), np.cos(a))
def clean_phase(ph):
"""Return a cleaned phase vector (which does not depend on an arbitrary modulo pi)
"""
ph = orb.utils.vector.robust_unwrap(np.copy(ph), 2*np.pi)
if np.any(np.isnan(ph)):
ph.fill(np.nan)
else:
# set the first sample at the smallest positive modulo pi
# value (order 0 is modulo pi)
new_orig = np.fmod(ph[0], np.pi)
while new_orig < 0:
new_orig += np.pi
if np.abs(new_orig) > np.abs(new_orig - np.pi):
new_orig -= np.pi
elif np.abs(new_orig) > np.abs(new_orig + np.pi):
new_orig += np.pi
ph -= ph[0]
ph += new_orig
return ph
def next_power_of_two(n):
"""Return the next power of two greater than n.
:param n: The number from which the next power of two has to be
computed. Can be an array of numbers.
"""
return np.array(2.**np.ceil(np.log2(n))).astype(int)
def raw_fft(x, apod=None, inverse=False, return_complex=False,
return_phase=False):
"""
Compute the raw FFT of a vector.
Return the absolute value of the complex vector by default.
:param x: Interferogram.
:param apod: (Optional) Apodization function used. See
:py:meth:`utils.norton_beer_window` (default None)
:param inverse: (Optional) If True compute the inverse FFT
(default False).
:param return_complex: (Optional) If True, the complex vector is
returned (default False).
:param return_phase: (Optional) If True, the phase is
returned.(default False)
"""
x = np.copy(x)
windows = ['1.1', '1.2', '1.3', '1.4', '1.5',
'1.6', '1.7', '1.8', '1.9', '2.0']
N = x.shape[0]
# mean substraction
x -= np.mean(x)
# apodization
if apod in windows:
x *= gaussian_window(apod, N)
elif apod is not None:
raise Exception("Unknown apodization function try %s"%
str(windows))
# zero padding
zv = np.zeros(N*2, dtype=x.dtype)
zv[int(N/2):int(N/2)+N] = x
# zero the centerburst
zv = np.roll(zv, zv.shape[0]/2)
# FFT
if not inverse:
x_fft = (np.fft.fft(zv))[:N]
else:
x_fft = (np.fft.ifft(zv))[:N]
if return_complex:
return x_fft
elif return_phase:
return np.unwrap(np.angle(x_fft))
else:
return np.abs(x_fft)
def cube_raw_fft(x, apod=None):
"""Compute the raw FFT of a cube (the last axis
beeing the interferogram axis)
:param x: Interferogram cube
:param apod: (Optional) Apodization function used. See
:py:meth:`utils.gaussian_window` (default None)
"""
x = np.copy(x)
N = x.shape[-1]
# mean substraction
x = (x.T - np.mean(x, axis=-1)).T
# apodization
if apod is not None:
x *= gaussian_window(apod, N)
# zero padding
zv_shape = np.array(x.shape)
zv_shape[-1] = N*2
zv = np.zeros(zv_shape)
zv[:,int(N/2):int(N/2)+N] = x
# FFT
return np.abs((np.fft.fft(zv))[::,:N])
def norton_beer_window(fwhm='1.6', n=1000):
"""
Return an extended Norton-Beer window function (see [NAY2007]_).
Returned window is symmetrical.
:param fwhm: FWHM relative to the sinc function. Must be: 1.1,
1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9 or 2.0. (default '1.6')
:param n: Number of points (default 1000)
.. note:: Coefficients of the extended Norton-Beer functions
apodizing functions [NAY2007]_ :
==== ======== ========= ======== ======== ======== ========
FWHM C0 C1 C2 C4 C6 C8
---- -------- --------- -------- -------- -------- --------
1.1 0.701551 -0.639244 0.937693 0.000000 0.000000 0.000000
1.2 0.396430 -0.150902 0.754472 0.000000 0.000000 0.000000
1.3 0.237413 -0.065285 0.827872 0.000000 0.000000 0.000000
1.4 0.153945 -0.141765 0.987820 0.000000 0.000000 0.000000
1.5 0.077112 0.000000 0.703371 0.219517 0.000000 0.000000
1.6 0.039234 0.000000 0.630268 0.234934 0.095563 0.000000
1.7 0.020078 0.000000 0.480667 0.386409 0.112845 0.000000
1.8 0.010172 0.000000 0.344429 0.451817 0.193580 0.000000
1.9 0.004773 0.000000 0.232473 0.464562 0.298191 0.000000
2.0 0.002267 0.000000 0.140412 0.487172 0.256200 0.113948
==== ======== ========= ======== ======== ======== ========
.. [NAY2007] Naylor, D. A., & Tahic, M. K. (2007). Apodizing
functions for Fourier transform spectroscopy. Journal of the
Optical Society of America A.
"""
norton_beer_coeffs = [
[1.1, 0.701551, -0.639244, 0.937693, 0., 0., 0., 0., 0., 0.],
[1.2, 0.396430, -0.150902, 0.754472, 0., 0., 0., 0., 0., 0.],
[1.3, 0.237413, -0.065285, 0.827872, 0., 0., 0., 0., 0., 0.],
[1.4, 0.153945, -0.141765, 0.987820, 0., 0., 0., 0., 0., 0.],
[1.5, 0.077112, 0., 0.703371, 0., 0.219517, 0., 0., 0., 0.],
[1.6, 0.039234, 0., 0.630268, 0., 0.234934, 0., 0.095563, 0., 0.],
[1.7, 0.020078, 0., 0.480667, 0., 0.386409, 0., 0.112845, 0., 0.],
[1.8, 0.010172, 0., 0.344429, 0., 0.451817, 0., 0.193580, 0., 0.],
[1.9, 0.004773, 0., 0.232473, 0., 0.464562, 0., 0.298191, 0., 0.],
[2.0, 0.002267, 0., 0.140412, 0., 0.487172, 0., 0.256200, 0., 0.113948]]
fwhm_list = ['1.1', '1.2', '1.3', '1.4', '1.5',
'1.6', '1.7', '1.8', '1.9', '2.0']
if fwhm in fwhm_list:
fwhm_index = fwhm_list.index(fwhm)
else:
raise Exception("Bad extended Norton-Beer window FWHM. Must be in : " + str(fwhm_list))
x = np.linspace(-1., 1., n)
nb = np.zeros_like(x)
for index in range(9):
nb += norton_beer_coeffs[fwhm_index][index+1]*(1. - x**2)**index
return nb
def apod2width(apod):
"""Return the width of the gaussian window for a given apodization level.
:param apod: Apodization level (must be >= 1.)
The apodization level is the broadening factor of the line (an
apodization level of 2 mean that the line fwhm will be 2 times
wider).
"""
if apod < 1.: raise Exception(
'Apodization level (broadening factor) must be > 1')
return apod - 1. + (gvar.erf(math.pi / 2. * gvar.sqrt(apod - 1.))
* orb.constants.FWHM_SINC_COEFF)
def width2apod(width):
"""This is the inverse of apod2width.
As the inverse is at least complicated to compute. This is done via
minimization.
"""
def diff(apod, width):
return apod2width(apod) - width
if width < 0: raise ValueError('width must be a positive float')
fit = optimize.least_squares(diff, 1, args=(width, ))
if fit.success:
return fit.x[0]
else:
raise Exception('error when inverting apod2width: {}'.format(fit.message))
def apod2sigma(apod, fwhm):
"""Return the broadening of the gaussian-sinc function in the
spectrum for a given apodization level. Unit is that of the fwhm.
:param apod: Apodization level (must be >= 1.)
"""
broadening = 2. * (apod2width(apod) / (math.sqrt(2.) * math.pi)
/ orb.utils.spectrum.compute_line_fwhm_pix(
oversampling_ratio=1))
return broadening * fwhm
def sigma2apod(sigma, fwhm):
"""This is the inverse of apod2sigma.
As the inverse is at least complicated to compute. This is done via
minimization.
"""
def diff(apod, sigma, fwhm):
return apod2sigma(apod, fwhm) - sigma
if sigma < 0: raise ValueError('sigma must be a positive float')
if fwhm <= 0: raise ValueError('fwhm must be a strictly positive float')
fit = optimize.least_squares(diff, 1, args=(sigma, fwhm))
if fit.success:
return fit.x[0]
else:
raise Exception('error when inverting apod2sigma: {}'.format(fit.message))
def gaussian_window(coeff, x):
"""Return a Gaussian apodization function for a given broadening
factor.
:param coeff: FWHM relative to the sinc function. Must be a float > 1.
:param x: Must be an axis defined between -1 and 1 inclusively.
x = np.linspace(-1., 1., n) for a symmetrical window.
"""
coeff = float(coeff)
#x = np.linspace(-1., 1., n)
w = apod2width(coeff)
return np.exp(-x**2 * w**2)
def learner95_window(x):
"""Return the apodization function described in Learner et al.,
J. Opt. Soc. Am. A, 12, (1995).
This function is closely related to the minimum four-term
Blackman-Harris window.
:param x: Must be an axis defnined between -1 and 1 inclusively.
x = np.linspace(-1., 1., n) for a symmetrical window.
"""
#
return (0.355766
+ 0.487395 * np.cos(math.pi*x)
+ 0.144234 * np.cos(2.*math.pi*x)
+ 0.012605 * np.cos(3.*math.pi*x))
def border_cut_window(n, coeff=0.2):
"""Return a window function with only the edges cut by a nice
gaussian shape function.
:param n: Window length
:param coeff: Border size in percentage of the total length.
"""
window = np.ones(n)
border_length = int(float(n)*coeff)
if border_length <= 1:
window[0] = 0.
window[-1] = 0.
else:
borders = signal.get_window(("gaussian",border_length/3.),
border_length*2+1)
z = int(float(borders.shape[0])/2.)
window[:z] = borders[:z]
window[-z:] = borders[-z:]
return window
def ndft(a, xk, vj):
"""Non-uniform Discret Fourier Tranform
Compute the spectrum from an interferogram. Note that the axis can
be irregularly sampled.
If the spectral axis (output axis) is irregular the result is
exact. But there is no magic: if the input axis (interferogram
sampling) is irregular the output spectrum is not exact because
the projection basis is not orthonormal.
If the interferogram is the addition of multiple regularly sampled
scans with a opd shift between each scan, the result will be good
as long as there are not too much scans added one after the
other. But if the interferogram steps are randomly distributed, it
will be better to use a classic FFT because the resulting noise
will be much lower.
:param a: 1D interferogram
:param xk: 1D sampling steps of the interferogram. Must have the
same size as a and must be relative to the real step length,
i.e. if the sampling is uniform xk = np.arange(a.size).
:param vj: 1D frequency sampling of the output spectrum.
"""
assert a.ndim == 1, 'a must be a 1d vector'
assert vj.ndim == 1, 'vj must be a 1d vector'
assert a.size == xk.size, 'size of a must equal size of xk'
angle = np.inner((-2.j * np.pi * xk / xk.size)[:,None], vj[:,None])
return np.dot(a, np.exp(angle))
def indft(a, x):
"""Inverse Non-uniform Discret Fourier Transform.
Compute the irregularly sampled interferogram from a regularly
sampled spectrum.
:param a: regularly sampled spectrum.
:param x: positions of the interferogram samples. If x =
range(size(a)), this function is equivalent to an idft or a
ifft. Note that the ifft is of course much faster to
compute. This vector may have any length.
"""
return orb.cutils.indft(a.astype(float), x.astype(float))
def spectrum_mean_energy(spectrum):
"""Return the mean energy of a spectrum by channel.
:param spectrum: a 1D spectrum
"""
return orb.cutils.spectrum_mean_energy(spectrum)
def interf_mean_energy(interf):
"""Return the mean energy of an interferogram by step.
:param interf: an interferogram
.. warning:: The mean of the interferogram is substracted to
compute only the modulation energy. This is the modulation
energy which must be conserved in the resulting spectrum. Note
that the interferogram transformation function (see
:py:meth:`utils.transform_interferogram`) remove the mean of the
interferogram before computing its FFT.
.. note:: NaNs are set to 0.
"""
return orb.cutils.interf_mean_energy(interf)
def phase_model(sigma, sigmaref, p):
"""Phase model
A simple polynomial. Defining a reference wavenumber in the given
axis is important since, if a 0 is not in the axis, the polynomial
fitting is unstable. This reference is defined in the filterfile.
"""
return np.polynomial.polynomial.polyval(sigma - sigmaref, p)
| gpl-3.0 | -8,758,207,435,317,938,000 | 32.565217 | 95 | 0.610463 | false | 3.15603 | false | false | false |
marksweiss/sofine | sofine/runner.py | 1 | 19931 | """
This module is the main driver for calls to plugins from the CLI interface.
It also has all of the scaffolding and wrapper functions required to generically invoke
and run any of the supported plugin methods in the plugin interface for any plugin
using just the plugin name, plugin group and call args.
"""
import sofine.lib.utils.utils as utils
import sofine.lib.utils.conf as conf
from optparse import OptionParser
import sys
def get_data(data, data_source, data_source_group, data_source_args):
"""
* `data` - `dict`. A dict of keys and associated array of dicts of attribute keys and values. May be empty.
Any data collected by this call with append new keys and values to `data`, and append new attribute keys
and values for existing keys into the array of attribute key/attribute value (single-entry) dicts
associated with that key. Also, if this call is invoked from a piped command line call piping to sofine,
that will be detected and `data` will be read from `stdin`, overriding whatever value is passed in for this arg.
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
* `data_source_args` - `list`. The args for the plugin call, in `argv` format with alternating elements
referring to argument names and argument values.
* `use_namespaced_attrs` - Defaults to False. Prepend all attribute keys from all plugin calls with the plugin name and plugin
group to guarantee the key name is unique in the returned data set.
Main driver function for retrieving data from a plugin. Calls a plugin's _required_ `get_data` method.
Takes a list of data_sources and a list of argument lists to call when calling each data_source.
Can be called directly or from `main` if this module was instantiated from the command line.
This method operates based on the core data aggregation semantics of the library:
* If this is the first call in the chain, data is empty, so just fill it with the return of this call
* If there is already data, add any new keys retrieved and add attribute key/value pairs associated
with any new or existing keys
is True.
* The set of keys on each call is the union of all previously collected keys
* The set of attributes associated with each key is the union of all previously collected attribute/value
pairs collected for that key
Output looks like this:
{"key_1" : [{"attr_name_1" : value}, {"attr_name_2" : value}, {"attr_name_1, value}],
"key_2" : ...
}
"""
plugin = utils.load_plugin(data_source, data_source_group)
is_valid, parsed_args = plugin.parse_args(data_source_args)
if not is_valid:
raise ValueError ('Invalid value passed in call to {0}. Args passed: {1})'.format(data_source, data_source_args))
new_data = plugin.get_data(data.keys(), parsed_args)
if len(new_data.keys()) > 0:
for k in new_data.keys():
# Convert returned dict of attributes into a list of individual dicts. This allows all data
# from all plugins to be added to the output without needing namespacing to prevent attributed
# keys from overwriting each other. Namespacing can optionally be turned on by the caller.
new_data_list = [{name : val} for name, val in new_data[k].iteritems()]
if k in data:
data[k] += new_data_list
else:
data[k] = new_data_list
return data
def get_namespaced_data(data, data_source, data_source_group, data_source_args):
"""As in `get_data`, but each attribute dict in each array of attribute dicts that is the value of each key
in the data set is prepended with the plugin name and plugin group.
Namespaced output looks like this:
{"key_1" : [{"plugin_group_A::plugin_1::attr_name_1" : value},
{"plugin_group_A::plugin_1::attr_name_2" : value},
{"plugin_group_B::plugin_1::attr_name_1" : value}],
"key_2" : ...
}
"""
data = get_data(data, data_source, data_source_group, data_source_args)
# Take the data returned, get the list of dicts associated with each key, for each attribute key in each
# attribute dict in each list of dicts, creat the namespaced key. Insert a new attribute dict into the list
# over the old one with the namespaced key and the same value
for attrs in data.values():
for j in range(0, len(attrs)):
attr = dict(attrs[j])
attr_key = utils.namespacer(data_source_group, data_source, attr.keys()[0])
attr_val = attr.values()[0]
attrs[j] = {attr_key : attr_val}
return data
def _validate_get_data_batch(data_sources, data_source_groups, data_source_args, fn_name):
if len(data_sources) != len(data_source_args) or \
len(data_sources) != len(data_source_groups) or \
len(data_source_groups) != len(data_source_args):
raise ValueError("""Call to runner.{0}() had different lengths for
data_sources (len == {1}),
data source_groups (len == {2}) and
data_source_args (len == {3)}""".format(fn_name, len(data_sources), len(data_source_groups), len(data_source_args)))
def get_data_batch(data, data_sources, data_source_groups, data_source_args):
"""
* `data` - `dict`. A dict of keys and associated array of dicts of attribute keys and values. May be empty.
Any data collected by this call with append new keys and values to `data`, and append new attribute keys
and values for existing keys into the dict associated with that key.
* `data_source` - `list`. A list of names of plugins being called.
* `data_source_group` - `list`. A list of names of plugin groups for the plugins being called.
* `data_source_args` - `list of list`. A list of lists of args for the plugin calls, in argv format with alternating elements
referring to argument names and argument values.
Convenience wrapper for users of sofine as a Python library. This function lets a user pass in
a list of data sources, a list of plugin groups and a list of lists of arguments for each plugin call.
Note that the elements must be in order in each argument: data source name in position 0 must match
data source group in position 0 and the list of args for that call in `data_source_args[0]`.
"""
_validate_get_data_batch(data_sources, data_source_groups, data_source_args, 'get_data_batch')
for j in range(0, len(data_sources)):
data = get_data(data, data_sources[j], data_source_groups[j], data_source_args[j])
return data
def get_namespaced_data_batch(data, data_sources, data_source_groups, data_source_args):
"""As in `get_data_batch`, but each attribute dict in each array of attribute dicts that is the value of each key
in the data set is prepended with the plugin name and plugin group. All plugins called in the batch call will
namespace the attributes they contribute to the final data set returned.
Namespaced output looks like this:
{"key_1" : [{"plugin_group_A::plugin_1::attr_name_1" : value},
{"plugin_group_A::plugin_1::attr_name_2" : value},
{"plugin_group_B::plugin_1::attr_name_1" : value}],
"key_2" : ...
}
"""
_validate_get_data_batch(data_sources, data_source_groups, data_source_args, 'get_namespaced_data_batch')
for j in range(0, len(data_sources)):
data = get_namespaced_data(data, data_sources[j], data_source_groups[j], data_source_args[j])
return data
def _get_schema(get_schema_call, parse_args_call, data_source, data_source_group, args):
plugin = utils.load_plugin(data_source, data_source_group)
schema = None
if not args:
schema = get_schema_call()
else:
is_valid, parsed_args = parse_args_call(args)
if not is_valid:
raise ValueError ('Invalid value passed in call to {0}. Args passed: {1})'.format(data_source, data_source_args))
schema = get_schema_call(parsed_args)
return {"schema" : schema}
def get_schema(data_source, data_source_group, args=None):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
* `args` - `any`. This is a bit of a hack, but basically there are use cases that could require args in
order to figure out the schema of available fields. Maybe a plugin wraps access to a data store that allows
arbitary or varying schemas per document retrieved. Or maybe, like the included `standard.file_source`
plugin, it wraps access to a config that can provide an arbitrary list of fields.
This returns the value for a plugin's _optional_ (but highly recommended) `self.schema` attribute.
This method lets plugin users introspect the plugin to ask what schema fields it provides, that is, what
set of attribute keys can it to the attributes dict for each key in data.
Note that the default implementation is provided by `PluginBase` and it returns a properly namespaced list
of attribute keys. All the plugin creator has to do is set the `self.schema` attribute of their plugin to a
list of strings of the attribute keys it can return.
Not all data sources gurarantee they will return all attribute keys for each key in data, and not
all data sources guarantee they will return the same set of attribute keys for each key in data in
one returned data set.
"""
plugin = utils.load_plugin(data_source, data_source_group)
return _get_schema(plugin.get_schema, plugin.parse_args, data_source, data_source_group, args)
def get_namespaced_schema(data_source, data_source_group, args=None):
"""As in `get_schema` except that the schema attribute keys returned are prepended with the `data_source` and
`data_source_group`.
"""
plugin = utils.load_plugin(data_source, data_source_group)
return _get_schema(plugin.get_namespaced_schema, plugin.parse_args, data_source, data_source_group, args)
def parse_args(data_source, data_source_group, data_source_args):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
* `data_source_args` - `list`. The args for the plugin call, in `argv` format with alternating elements
referring to argument names and argument values.
A wrapper which calls a plugin's _required_ `parse_args` method. This method must parse arguments the plugin's `get_data`
call requires, with the arguments in argv format with alternating elements referring to argument
names and argument values.
The method is also responsible for validating arguments and returning a boolean `is_valid` as well as the
parsed (and possibly modified) args.
"""
plugin = utils.load_plugin(data_source, data_source_group)
is_valid, parsed_args = plugin.parse_args(data_source_args)
return {"is_valid" : is_valid, "parsed_args" : parsed_args}
def adds_keys(data_source, data_source_group):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
A wrapper which calls a plugin's _optional_ (but recommended) `adds_keys` method. This introspection method
lets plugin users ask whether a plugin adds its own keys to the `data` output or simply adds key/value
attributes to the dicts being built by sofine for each key in `data`.
"""
plugin = utils.load_plugin(data_source, data_source_group)
adds_keys = plugin.get_adds_keys()
return {"adds_keys" : adds_keys}
def get_plugin_module(data_source, data_source_group):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
Convenience function for clients to get an instance of a plugin module.
This lets plugin implementers expose free functions in the module and have client
code be able to access them.
"""
return utils.load_plugin_module(data_source)
def get_plugin(data_source, data_source_group):
"""
* `data_source` - `string`. The name of the plugin being called.
* `data_source_group` - `string`. The name of the plugin group for the plugin being called.
Convenience function for clients to get an instance of a plugin.
This lets plugin implementers expose free functions in the module and have client
code be able to access them.
"""
return utils.load_plugin(data_source, data_source_group)
def _parse_runner_arg(args, arg_flags):
ret = None
def try_arg_flag(arg_flag):
e = ''
i = -1
try:
i = args.index(arg_flag)
except ValueError:
e = 'Required argument {0} not found in command line argument list passed to runner.main()'.format(arg_flag)
if i == len(args) - 1:
e = 'Value for required argument {0} not found in command line argument list passed to runner.main()'.format(arg_flag)
return e, i
# Try twice if necessary, for each of the two forms of the arg flag
err, idx = try_arg_flag(arg_flags[0])
if err:
err, idx = try_arg_flag(arg_flags[1])
# Flag was found, value for it parsed, and flag and value removed from args
if not err:
ret = args[idx + 1]
del args[idx + 1]
del args[idx]
return err, ret
def _parse_global_call_args(args):
# Default output to JSON
data_format = None
err, data_format = _parse_runner_arg(args, ['--SF-d', '--SF-data-format'])
if err:
data_format = conf.DEFAULT_DATA_FORMAT
return data_format
def _parse_runner_call_args(args):
data_source = None
data_source_group = None
action = None
# Parse for both versions of required flags and raise error if not found
err, data_source = _parse_runner_arg(args, ['--SF-s', '--SF-data-source'])
if err: raise ValueError(err)
err, data_source_group = _parse_runner_arg(args, ['--SF-g','--SF-data-source-group'])
if err: raise ValueError(err)
# For optional argument, don't throw if not found, just set default value
err, action = _parse_runner_arg(args, ['--SF-a', '--SF-action'])
if err:
action = 'get_data'
return data_source, data_source_group, action, args
def _run_action(action, ret, data_source, data_source_group, data_source_args):
if action == 'get_data':
ret = get_data(ret, data_source, data_source_group, data_source_args)
if action == 'get_namespaced_data':
ret = get_namespaced_data(ret, data_source, data_source_group, data_source_args)
elif action == 'get_schema':
ret = get_schema(data_source, data_source_group, data_source_args)
elif action == 'get_namespaced_schema':
ret = get_namespaced_schema(data_source, data_source_group, data_source_args)
elif action == 'adds_keys':
ret = adds_keys(data_source, data_source_group)
elif action == 'parse_args':
ret = parse_args(data_source, data_source_group, data_source_args)
return ret
def main(argv):
"""Entry point if called from the command line. Parses CLI args, validates them and calls run().
The arguments dedicated to this framework are expected to precede the remaining args
(for clarity of reading the entire command) but don't need to. In order to clearly
separate from the args required for the call being run, they are preceded by `--SF_*`.
There is a short form and long form of each command:
* `[--SF-d|--SF-data-format] - The data format to be used for all following piped calls to `get_data`
or `get_namespaced_data`. This argument is optional. It only is evaluated for `get-data`
and `get_namespaced_data`. If it isn't passed the default data format is JSON.
* `[--SF-s|--SF-data-source]` - The name of the data source being called. This is the
name of the plugin module being called. Required.
* `[--SF-g|--SF-data-source-group`] - The plugin group where the plugin lives. This is
the plugins subdirectory where the plugin module is deployed. Required.
`[--SF-a|--SF-action]` - The plugin action being called. One of five supported actions that must be part of every plugin:
- `get_data` - retrieves available data for the keys passed to it
- `get_namespaced_data` - retrieves available data for the keys passed to it, with the attribute keys associated with each
key prepended with the plugin name and plugin group
- `adds_keys` - returns a JSON object with the attribute `adds_keys` and a
boolean indicating whether the data source adds keys or just gets data for the keys passed to it
- `get_schema` - returns a JSON object with the attribute `schema` and the schema of attributes which
this data source may add for each key
- `get_namespaced_schema` - returns a JSON object with the attribute `schema` and the schema of attributes which
this data source may add for each key, with each attribute prepended with the plugin name and plugin group
- `parse_args` - returns the values parsed for the arguments passed to the call being
made as a JSON object with an attribute `args` and an array of parsed args,
and an attribute `is_valid` with a boolean indicating whether parsing succeeded.
The `[--SF-a|--SF-action]` argument is Optional. If you don't pass it, `get_data` is assumed.
Calls to `get_data` and `get_namespaced_data` can be piped together. You can mix `get_data` and `get_namespaced_data` calls
in a piped expression.
Calls to `adds_keys` and `get_schema` and `parse_args` cannot be piped.
All calls and their arguments must be enclosed in quotes as shown in the examples below.
The complete interface for a call piping two get_data calls together:
PATH/runner.py \'[--SF-s|--SF-data-source] DATA_SOURCE_1 \\
[--SF-g|--SF-data-source-group] DATA_SOURCE_GROUP_1 \\
ARGS | \\
[--SF-s|--SF-data-source] DATA_SOURCE_2 \\
[--SF-g|--SF-data-source-group] DATA_SOURCE_GROUP_2 \\
ARGS\'
An example call piping two get_data calls together:
PATH/runner.py \'--SF-s fidelity --SF-g examples \\
-c CUSTOMER_ID -p PASSWORD -a ACCOUNT_ID -e EMAIL | \\
--SF-s ystockquotelib --SF-g examples\'
An example get_schema call:
PATH/runner.py \'--SF-s fidelity --SF-g examples --SF-a get_schema \\
-c CUSTOMER_ID -p PASSWORD -a ACCOUNT_ID -e EMAIL\'
"""
ret = {}
# Get any global args and each piped data source and set of args to call it from the CLI
# CLI syntax is split on pipes
calls = ' '.join(argv).split('|')
if len(calls):
# Parse global args, which appear before any calls. Right now only output format
# is only global arg, and it will be applied to all actions, even when that makes less sense
global_arg_call = calls[0].strip().split()
data_format = _parse_global_call_args(global_arg_call)
data_format_plugin = utils.load_plugin_module(data_format)
# If input passed from stdin, set initial data in chain of calls to that.
# Thus supports composing sofine piped chains with preceding outer piped
# command line statements that include sofine pipes within them
if utils.has_stdin():
ret = sys.stdin.read()
ret = data_format_plugin.deserialize(ret)
for call in calls:
call = call.strip().split()
data_source, data_source_group, action, data_source_args = \
_parse_runner_call_args(call)
ret = _run_action(action, ret, data_source, data_source_group, data_source_args)
print data_format_plugin.serialize(ret)
if __name__ == '__main__':
# Client passes in a statement of one or more piped calls to
# data sources enclosed in quotes. Convert to list here because
# code in main() and run() expects an argument list
argv = sys.argv[1]
argv = argv.split()
main(argv)
| mit | -5,049,273,920,547,250,000 | 46.007075 | 130 | 0.6956 | false | 3.677985 | false | false | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/alpha/trecento/exceldiff.py | 1 | 2662 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from music21.ext import xlrd
#sys.path.append('/mit/cuthbert/www/music21')
if len(sys.argv) != 3:
raise Exception("Need two arguments to diff!")
if (sys.argv[1].count(':') == 1):
(book1name, sheetname1) = sys.argv[1].split(':')
if (book1name.count('.xls') == 0):
book1name += ".xls"
else:
raise ("First name must be in form filename:sheetname")
if (sys.argv[2].count(':') == 1):
(book2name, sheetname2) = sys.argv[2].split(':')
else:
(book2name, sheetname2) = (sys.argv[2], sheetname1)
if (book2name.count('.xls') == 0):
book2name += ".xls"
book1 = xlrd.open_workbook(book1name)
book2 = xlrd.open_workbook(book2name)
sheet1 = book1.sheet_by_name(sheetname1)
sheet2 = book2.sheet_by_name(sheetname2)
totalRows1 = sheet1.nrows
totalRows2 = sheet2.nrows
extraRows = 0
longsheet = 0
if (totalRows1 > totalRows2):
longsheet = 1
extraRows = (totalRows1 - totalRows2)
minRows = totalRows2
elif (totalRows1 < totalRows2):
longsheet = 2
extraRows = (totalRows2 - totalRows1)
minRows = totalRows1
else:
minRows = totalRows1 # doesnt matter which
for i in range(0, minRows):
rowvalues1 = sheet1.row_values(i)
rowvalues2 = sheet2.row_values(i)
longrow = 0
totalCells1 = len(rowvalues1)
totalCells2 = len(rowvalues2)
extraCells = 0
if (totalCells1 > totalCells2):
longrow = 1
extraCells = (totalCells1 - totalCells2)
minCells = totalCells2
elif (totalCells1 > totalCells2):
longrow = 2
extraCells = (totalCells2 - totalCells1)
minCells = totalCells1
else:
minCells = totalCells1 # doesnt matter which
for j in range(0, minCells):
if (rowvalues1[j] != rowvalues2[j]):
print("%3d,%2s--%34s : %34s" % (i+1,xlrd.colname(j),
unicode(rowvalues1[j]).encode('utf-8')[:34],
unicode(rowvalues2[j]).encode('utf-8')[:34]))
if (extraCells > 0):
print("%3d extra cells in row %3d in" % (extraCells, i+1),)
if (longrow == 1): print(book1name + ":" + sheetname1)
elif (longrow == 2): print(book2name + ":" + sheetname2)
else: raise Exception("What? longrow was not set!")
if (extraRows > 0):
print("%3d extra rows in" % extraRows,)
if (longsheet == 1): print(book1name + ":" + sheetname1)
elif (longsheet == 2): print(book2name + ":" + sheetname2)
else: raise Exception("What? longsheet was not set!")
#------------------------------------------------------------------------------
# eof
| mit | -3,328,757,419,221,253,000 | 28.910112 | 92 | 0.586777 | false | 3.176611 | false | false | false |
dirkhusemann/rezzme | RezzMe/launchers/linux2.py | 1 | 5118 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright (c) Contributors, http://opensimulator.org/
# See CONTRIBUTORS.TXT for a full list of copyright holders.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the OpenSim Project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import subprocess
import urllib
import PyQt4.QtCore
import RezzMe.exceptions
import RezzMe.launchers.hippo
class PlatformLauncher(object):
def __init__(self):
self._clientsDefault = {'hippo' : 'hippo_opensim_viewer',
'secondlife': 'secondlife'}
self._clients = {}
for c in self._clientsDefault:
for bin in os.environ['PATH'].split(':'):
t = '%s/%s' % (bin, self._clientsDefault[c])
if os.path.exists(t):
self._clients[c] = t
break
def _gClients(self):
return self._clients
Clients = property(fget = _gClients)
def _gClientPattern(self):
return 'client executable (*)'
ClientPattern = property(fget = _gClientPattern)
def HippoDefaultGrids(self, path):
hippoHome = os.path.dirname(os.path.realpath(path))
defaultGrids = '%s/app_settings/default_grids.xml' % hippoHome
if os.path.exists(defaultGrids):
logging.debug("launchers.linux2: found hippo's default_grids.xml at %s", defaultGrids)
return defaultGrids
logging.debug("launchers.linux2: trying to find hippo's default_grids.xml via locate...")
defaultGrids = subprocess.Popen(['locate', 'app_settings/default_grids.xml'], stdout = subprocess.PIPE).communicate()[0].rstrip()
if defaultGrids:
for p in defaultGrids.split():
if 'hippo' in p.lower():
logging.debug("launchers.linux2: found hippo's default_grids.xml at %s", p)
return p
return None
def Launch(self, avatar, password, gridInfo, clientName, client, location, purge):
# fix ' character appearing in irish names
avatar = urllib.unquote(avatar)
clientArgs = [ ]
clientArgs += ['-loginuri', gridInfo['login']]
clientArgs += ['-multiple']
keys = gridInfo.keys()
if 'welcome' in keys: clientArgs += ['-loginpage', gridInfo['welcome']]
if 'economy' in keys: clientArgs += ['-helperuri', gridInfo['economy']]
if purge:
clientArgs += ['--purge']
# mirror clientArgs into logArgs to avoid capturing passwords into
# log files
logArgs = clientArgs[:]
if avatar and password:
clientArgs += ['-login']
avatar = avatar.replace('(', '\(')
avatar = avatar.replace(')', '\)')
clientArgs += map(lambda x: "%s" % x, avatar.split())
logArgs = clientArgs[:]
clientArgs += [password]
logArgs += ["'**********'"]
if 'hippo' in clientName.lower() or 'hippo' in client.lower():
userGridXml = os.path.expanduser('~/.hippo_opensim_viewer/user_settings/grid_info.xml')
defaultGridXml = self.HippoDefaultGrids(client)
gridnick = RezzMe.launchers.hippo.HippoGridInfoFix(gridInfo, userGridXml, defaultGridXml)
clientArgs += ['-grid', gridnick]
logArgs += ['-grid', gridnick]
# has to come last
if location:
clientArgs += [location]
logArgs += [location]
# all systems go: start client
logging.debug('launchers.linux2: client %s %s', client, ' '.join(logArgs))
# os.execvp(client, clientArgs)
PyQt4.QtCore.QProcess.startDetached(client, clientArgs)
| bsd-3-clause | 8,639,283,520,196,367,000 | 39.619048 | 137 | 0.635795 | false | 4.134087 | false | false | false |
rueckstiess/mtools | mtools/mlogfilter/filters/fast_filter.py | 1 | 1215 | from .base_filter import BaseFilter
class FastFilter(BaseFilter):
"""
FastFilter class.
Accept only lines that have a duration that is shorter than the specified
parameter in ms.
"""
filterArgs = [
('--fast', {'action': 'store', 'nargs': '?', 'default': False,
'type': int,
'help': ('only output lines with query times shorter '
'than FAST ms (default 1000)')})
]
def __init__(self, mlogfilter):
BaseFilter.__init__(self, mlogfilter)
if ('fast' in self.mlogfilter.args and
self.mlogfilter.args['fast'] is not False):
self.active = True
if self.mlogfilter.args['fast'] is None:
self.fastms = 1000
else:
self.fastms = self.mlogfilter.args['fast']
def accept(self, logevent):
"""
Process line.
Overwrite BaseFilter.accept() and return True if the provided
logevent should be accepted (causing output), or False if not.
"""
if self.active and logevent.duration is not None:
return logevent.duration <= self.fastms
return False
| apache-2.0 | -160,321,035,544,508,300 | 30.973684 | 77 | 0.553909 | false | 4.323843 | false | false | false |
marrow/mongo | marrow/mongo/core/field/oid.py | 1 | 1116 | # encoding: utf-8
from __future__ import unicode_literals
from bson import ObjectId as OID
from collections import MutableMapping
from datetime import datetime, timedelta
from .base import Field
from ....schema import Attribute
from ....schema.compat import unicode
class ObjectId(Field):
__foreign__ = 'objectId'
__disallowed_operators__ = {'#array'}
default = Attribute()
def __fixup__(self, document):
super(ObjectId, self).__fixup__(document)
try: # Assign a default if one has not been given.
self.default
except AttributeError:
if self.__name__ == '_id': # But only if we're actually the primary key.
self.default = lambda: OID() # pylint:disable=unnecessary-lambda
def to_foreign(self, obj, name, value): # pylint:disable=unused-argument
if isinstance(value, OID):
return value
if isinstance(value, datetime):
return OID.from_datetime(value)
if isinstance(value, timedelta):
return OID.from_datetime(datetime.utcnow() + value)
if isinstance(value, MutableMapping) and '_id' in value:
return OID(value['_id'])
return OID(unicode(value))
| mit | 7,631,403,063,071,038,000 | 25.571429 | 76 | 0.701613 | false | 3.588424 | false | false | false |
majdigital/bigworldgraph | backend/tests/mock_nlp.py | 1 | 1055 | class MockTokenizer:
@staticmethod
def tokenize(sentence_data):
return sentence_data.split(" ") # Yes, yes, very sophisticated
class MockTagger:
def __init__(self, naive_tag_rule):
assert callable(naive_tag_rule)
self.naive_tag_rule = naive_tag_rule
def tag(self, tokenized_sentence):
return [self.naive_tag_rule(token.lower(), tokenized_sentence) for token in tokenized_sentence]
class MockParser:
@staticmethod
def raw_parse(sentence_data):
tokens = sentence_data.split(" ")
return {
"root": {
"address": 0
},
"nodes": {
node_id: {
"address": node_id,
"word": "ROOT" if node_id == 0 else tokens[node_id - 1],
"rel": None if node_id == 0 else node_id - 1,
"deps": {
"rel": node_id + 1
}
}
for node_id in range(len(tokens) + 1)
}
}
| mit | 3,103,457,414,177,886,000 | 28.305556 | 103 | 0.483412 | false | 4.073359 | false | false | false |
stoic1979/careermaker | server.py | 1 | 9828 | from flask import Flask, render_template, request, redirect, jsonify, make_response
from models import User, Candidate, \
Company, Vacancy, JobCategory, Skill, db, app
import md5
from flask_pymongo import PyMongo
import traceback
import os
import jwt
import datetime
from functools import wraps
import json
from scraper.config import *
from scraper.db import Mdb
app = Flask(__name__)
mongo = PyMongo(app)
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from bson import ObjectId
mdb = Mdb()
######################################################
# #
# Since mongodb's _id of each record was not getting #
# json encoded, so this custom JSONEncoder is needed #
# #
######################################################
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
# Setup login manager
login_manager = LoginManager()
login_manager.init_app(app)
class ChildView(ModelView):
column_display_pk = True
column_hide_backrefs = False
column_list = ('category', 'title', 'created_at')
admin = Admin(app, name='CareerMaker Admin', template_mode='bootstrap3')
admin.add_view(ModelView(Candidate, db.session))
admin.add_view(ModelView(Company, db.session))
admin.add_view(ModelView(Vacancy, db.session))
admin.add_view(ModelView(JobCategory, db.session))
# admin.add_view(ModelView(Skill, db.session))
# admin.add_view(ChildVinameew(Skill, db.session))
admin.add_view(ModelView(User, db.session))
@app.route("/")
@login_required
def index():
templateData = {'title': 'Home Page'}
return render_template("index.html", **templateData )
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@app.route("/api_demo")
def candidate_form():
templateData = {'title' : 'Home Page'}
return render_template("api_demo.html", **templateData )
@app.route("/find_company_data", methods=['POST'])
def find_company_data():
ret = {"err": 0}
try:
ret = []
print "find_company_data() ::", request.form
login()
cname = request.form['cname']
# ret['Company Name'] = cname
collection_android = mdb.db["job_vacancy_android"]
collection_python = mdb.db["job_vacancy_python"]
result = collection_android.find({"title": cname})
print result
# ret.append(result)
for data in result:
ret.append(data)
result = collection_python.find({"title": cname})
print result
# ret.append(result)
for data in result:
ret.append(data)
# testing code
print JSONEncoder().encode({'job_vacancy %s ': ret})
# mdb.retrieve_data(cname)
except Exception as exp:
print "find_company_data() :: Got exception: %s" % exp
print(traceback.format_exc())
# return json.dumps(ret)
return JSONEncoder().encode({'job_vacancy': ret})
@app.route("/save_candidate", methods=['POST'])
def save_candidate():
try:
print "save_candidate(): :", request.form
user_id = request.form['user_id']
name = request.form['name']
email = request.form['email']
pswd = request.form['pswd']
age = request.form['age']
phone = request.form['phone']
address = request.form['address']
gender = request.form['gender']
encodepassword = md5.new(pswd).hexdigest()
# save candidate in db
candidate = Candidate(user_id, name, email, encodepassword, age, phone, address, gender)
db.session.add(candidate)
db.session.commit()
except Exception as exp:
print "save_candidate(): : Got Exception: %s" % exp
print(traceback.format_exc())
return "Candidate Data Saved"
@app.route("/save_company", methods=['POST'])
def save_company():
try:
print "save_company() :: ", request.form
user_id = request.form['user_id']
name = request.form['name']
website = request.form['website']
email = request.form['email']
pswd = request.form['pswd']
mobile = request.form['mobile']
telno = request.form['telno']
address = request.form['address']
city = request.form['city']
state = request.form['state']
country = request.form['country']
pin = request.form['pin']
encodepswd = md5.new(pswd).hexdigest()
# saving company in db
company = Company(user_id, name, website, email, encodepswd, mobile, telno, address, city, state, country, pin)
db.session.add(company)
db.session.commit()
except Exception as exp:
print "save_company(): : Got Exception: %s" % exp
print (traceback.format_exc())
return "Company Saved"
@app.route("/save_vacancy", methods=['POST'])
def save_vacancy():
try:
comp_id = request.form['comp_id']
cand_id = request.form['cand_id']
post_date = request.form['post_date']
expiry_date = request.form['expiry_date']
sal_min = request.form['sal_min']
sal_max = request.form['sal_max']
fulltime = request.form['fulltime']
# saving vacancy in db
vacancy = Vacancy(comp_id, cand_id, post_date, expiry_date, sal_min, sal_max, fulltime)
db.session.add(vacancy)
db.session.commit()
except Exception as exp:
print "save_vacancy() :: Got Exception: %s" % exp
print (traceback.format_exc())
return "Vacancy saved"
@app.route("/save_JobCategory", methods=['POST'])
def save_JobCategory():
try:
title = request.form['indextitle']
# savin Job Category in db
jbcategory = JobCategory(title)
db.session.add(jbcategory)
db.session.commit()
except Exception as exp:
print "save_JobCategory() :: Got Exception: %s" % exp
print (traceback.format_exc())
return "Save Job Category"
@app.route("/save_skill", methods=['POST'])
def save_skill():
try:
cat_id = request.form['cat_id']
title = request.form['title']
# saving skill in db
skill = Skill(cat_id, title)
db.session.add(skill)
db.session.commit()
except Exception as exp:
print "save_skill() :: Got Excetion: %s" % exp
print(traceback.format_exc())
return "Save Skill"
@app.route("/search", methods=['POST'])
def search():
try:
print "search() :: %s", request.form
except Exception as exp:
print "search() :: Got Exception: %s" % exp
print (traceback.format_exc())
return "Job Search"
@app.route("/user_register", methods=['POST'])
def user_register():
try:
print "user_register() :: ", request.form
username = request.form['username']
pswd = request.form['pswd']
encodepswd = md5.new(pswd).hexdigest()
user = User(username, encodepswd)
db.session.add(user)
db.session.commit()
except Exception as exp:
print "user_register() :: Got Exception: %s" % exp
print(traceback.format_exc())
return "user Register Successfully"
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == "GET":
print "login GET"
templateData = {'title' : 'Login To Career Maker'}
return render_template("index.html", **templateData)
else:
username = request.form['username']
pswd = request.form['pswd']
encodepswd = md5.new(pswd).hexdigest()
user = User.query.filter_by(username=username).filter_by(pswd=encodepswd).first()
if not user:
print "The username and Password is invalid"
return "Invalid Username and Password"
else:
print "login is successfull"
templateData = {'title' : 'Home Page'}
return render_template("index.html", **templateData)
"""
# token authentication
app.config['secretkey'] = 'some-strong+secret#key'
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = request.args.get('token')
# ensure that token is specified in the request
if not token:
return jsonify({'message': 'Missing token!'})
# ensure that token is valid
try:
data = jwt.decode(token, app.config['secretkey'])
except:
return jsonify({'message': 'Invalid token!'})
return f(*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return 'unprotected'
@app.route('/protected')
@token_required
def protected():
return 'protected'
@app.route('/login')
def login():
auth = request.authorization
if auth and auth.password == 'password':
expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)
token = jwt.encode({'user': auth.username, 'exp': expiry}, app.config['secretkey'], algorithm='HS256')
return jsonify({'token': token.decode('UTF-8')})
return make_response('Could not verify!', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
"""
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect("/")
#################################################################
# #
# Main Server #
# #
#################################################################
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True, threaded=True)
| gpl-3.0 | 804,700,029,827,939,800 | 29.521739 | 119 | 0.597477 | false | 3.803406 | false | false | false |
jastination/software-engineering-excercise-repository | seer_python/interviewstreet/LuckyNumber.py | 1 | 1265 | '''
Created on May 25, 2012
@author: jjhuang
'''
def getAllPrimeNumber(N):
ret = []
for n in range(2, N + 1):
isPrime = True
for i in range(2, n//2 + 1):
if(n % i == 0):
isPrime = False
break
if(isPrime):
ret.append(n)
return ret
def buildCache(N):
table1 = []
table2 = []
for x in range(N):
a = 0
b = 0
while(x > 0):
m = x % 10
a += m
b += m * m
x //= 10
table1.append(a)
table2.append(b)
return table1,table2
if __name__ == '__main__':
#T = int(input())
primeTable = set(getAllPrimeNumber(1500))
# for t in range(T):
#A,B = [int(x) for x in input().split(" ")]
A,B = 1,1000000000
# cnt = 0
# n = A
# while(n<=B):
# a = 0
# b = 0
# nn = n
# while(nn > 0):
# d = nn % MOD
# a += table1[d]
# b += table2[d]
# nn //= MOD
# if(a in primeTable and b in primeTable):
# cnt += 1
# n += 1
# print(cnt)
| mit | 6,946,488,043,172,524,000 | 16.071429 | 49 | 0.355731 | false | 3.186398 | false | false | false |
steder/maroonmpi | subunit/python/subunit/tests/test_subunit_tags.py | 1 | 2324 | #
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Tests for subunit.tag_stream."""
import unittest
from StringIO import StringIO
import subunit
class TestSubUnitTags(unittest.TestCase):
def setUp(self):
self.original = StringIO()
self.filtered = StringIO()
def test_add_tag(self):
self.original.write("tags: foo\n")
self.original.write("test: test\n")
self.original.write("tags: bar -quux\n")
self.original.write("success: test\n")
self.original.seek(0)
result = subunit.tag_stream(self.original, self.filtered, ["quux"])
self.assertEqual([
"tags: quux",
"tags: foo",
"test: test",
"tags: bar",
"success: test",
],
self.filtered.getvalue().splitlines())
def test_remove_tag(self):
self.original.write("tags: foo\n")
self.original.write("test: test\n")
self.original.write("tags: bar -quux\n")
self.original.write("success: test\n")
self.original.seek(0)
result = subunit.tag_stream(self.original, self.filtered, ["-bar"])
self.assertEqual([
"tags: -bar",
"tags: foo",
"test: test",
"tags: -quux",
"success: test",
],
self.filtered.getvalue().splitlines())
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
result = loader.loadTestsFromName(__name__)
return result
| gpl-2.0 | -8,039,500,400,970,692,000 | 32.2 | 80 | 0.63296 | false | 3.886288 | true | false | false |
liampauling/flumine | flumine/strategy/runnercontext.py | 1 | 1835 | import logging
import datetime
from typing import Optional
logger = logging.getLogger(__name__)
class RunnerContext:
"""Runner context at strategy level"""
def __init__(self, selection_id: int):
self.selection_id = selection_id
self.invested = False
self.datetime_last_placed = None
self.datetime_last_reset = None
self.trades = []
self.live_trades = []
def place(self, trade_id) -> None:
self.invested = True
self.datetime_last_placed = datetime.datetime.utcnow()
if trade_id not in self.trades:
self.trades.append(trade_id)
if trade_id not in self.live_trades:
self.live_trades.append(trade_id)
def reset(self, trade_id) -> None:
self.datetime_last_reset = datetime.datetime.utcnow()
try:
self.live_trades.remove(trade_id)
except ValueError:
logger.warning(
"Trade '%s' not present in RunnerContext live_trades on reset"
% trade_id
)
@property
def executable_orders(self) -> bool:
if self.live_trades:
return True
else:
return False
@property
def trade_count(self) -> int:
return len(self.trades)
@property
def live_trade_count(self) -> int:
return len(self.live_trades)
@property
def placed_elapsed_seconds(self) -> Optional[float]:
if self.datetime_last_placed:
return (
datetime.datetime.utcnow() - self.datetime_last_placed
).total_seconds()
@property
def reset_elapsed_seconds(self) -> Optional[float]:
if self.datetime_last_reset:
return (
datetime.datetime.utcnow() - self.datetime_last_reset
).total_seconds()
| mit | 4,729,211,881,912,501,000 | 27.671875 | 78 | 0.585286 | false | 3.980477 | false | false | false |
katadh/ngdl | ngdl.py | 1 | 9612 | import re
import nltk
import ngdl_classes
import global_vars
import ngdl_parse
import ngdl_write
def start_dialog(output_file="test.txt"):
if not global_vars.initialized:
global_vars.init()
else:
reset_global_vars()
output = open(output_file, "w")
print "Welcome to the natural language game creation program for general game playing!"
#print "First we'll work on defining the game environment"
board_size_dialog()
player_num_dialog()
game_pieces_dialog()
player_move_dialog()
goal_dialog()
terminal_dialog()
ngdl_write.write_gdl_file(output)
output.close()
def reset_global_vars():
global_vars.write_queue = [["noop", []], ["goals", []], ["terminal", []], ["distinct_cells", []], ["successors", [50]]]
global_vars.game = ngdl_classes.Game()
def board_size_dialog():
in_board_size = raw_input("What size would you like your board to be?: ")
valid_input = re.search("([0-9]+)\s?(by|x|X)\s?([0-9]+)", in_board_size)
while not valid_input:
print "Sorry, I can't understand that input yet, can you try again?"
in_board_size = raw_input("What size would you like your game to be?: ")
valid_input = re.search("([0-9]+)\s?(by|x|X)\s?([0-9]+)", in_board_size)
board_size = (valid_input.group(1), valid_input.group(3))
#confirmation = raw_input("To confirm, there will be " + board_size[0] + " columns and " + board_size[1] + " rows?: ")
global_vars.game.board = ngdl_classes.Board((int(board_size[0]), int(board_size[1])))
global_vars.write_queue.append(["board" , []])
def player_num_dialog():
in_player_num = raw_input("How many players does your game have?: ")
valid_input = re.search("[0-9]+", in_player_num)
while not valid_input:
print "Sorry, I can't understand that input yet, can you try again?"
in_player_num = raw_input("How many players does your game have?: ")
valid_input = re.search("[0-9]+", in_player_num)
num_players = int(valid_input.group())
for p in range(1,num_players+1):
global_vars.game.players.append(ngdl_classes.Player("player" + str(p)))
global_vars.write_queue.append(["players", []])
def game_pieces_dialog():
for player in global_vars.game.players:
in_piece_names = raw_input("What types of pieces does " + player.name + " have?: ")
pieces = re.findall("([0-9]*)\s|^([^\W\d]+)", in_piece_names)
for p in pieces:
global_vars.game.pieces[p[1]] = ngdl_classes.Piece(p[1])
player.pieces.append(p[1])
on_board_response = raw_input("Do any of " + player.name + "'s pieces start on the board?: ")
on_board_response = on_board_response.lower()
if not re.match("[no|n]", on_board_response):
for p in pieces:
if p[0] == "" or int(p[0]) > 1:
p_positions = raw_input("What are the starting positions <col, row> of the " +
p[1] + " that start on the board? (enter to skip): ")
else:
p_positions = raw_input("What is the starting position <col, row> of the " +
p[1] + " if it starts on the board? (enter to skip): ")
positions = re.findall("([0-9]+),\s?([0-9]+)", p_positions)
if positions:
for pos in positions:
global_vars.game.board.starting_positions[(int(pos[0]), int(pos[1]))] = player.name + " " + piece.name
def player_move_dialog():
move_conditions = raw_input("What can a player do on their turn?: ")
parse_trees = ngdl_parse.parse(move_conditions, 2)
nltk_tree = parse_trees[0]
tree = translate_tree(nltk_tree)
conditions = process_condition(tree)
action = tree.find_closest_node("ACTION")
while action.children:
index = [child.name for child in action.children].index("ACTION")
action = action[index]
if action.value == "drop":
drop_response = raw_input("By 'drop', do you mean dropping a piece like in Connect-4, or placing a piece like in Shogi?: ")
drop_response.lower()
if re.match("[connect\-4|drop]", drop_response):
global_vars.write_queue.append(["drop_occupant_conditions", [[conditions]]])
global_vars.write_queue.append(["perpetuate_untouched_cells", [["drop"]]])
else:
global_vars.write_queue.append(["place_occupant_conditions", [[conditions]]])
global_vars.write_queue.append(["perpetuate_untouched_cells", [["place"]]])
elif action.value in ["place", "mark"]:
global_vars.write_queue.append(["place_occupant_conditions", [[conditions]]])
global_vars.write_queue.append(["perpetuate_untouched_cells", [["place"]]])
#def piece_move_dialog():
def goal_dialog():
win_conditions = raw_input("How does a player win?: ")
parse_trees = ngdl_parse.parse(win_conditions, 1)
nltk_tree = parse_trees[0]
tree = translate_tree(nltk_tree)
#result = tree.find_closest_node("RESULT")
conditions_tree = tree.find_closest_node("COND")
conditions = process_condition(conditions_tree)
global_vars.write_queue.append(["win_conditions", [[conditions], ""]])
def terminal_dialog():
game_end_conditions = raw_input("Aside from a player winning, how does the game end?: ")
parse_trees = ngdl_parse.parse(game_end_conditions, 1)
nltk_tree = parse_trees[0]
tree = translate_tree(nltk_tree)
conditions_tree = tree.find_closest_node("COND")
conditions = process_condition(conditions_tree)
global_vars.write_queue.append(["game_end_conditions", [[conditions]]])
def process_result(result):
return
def process_conditions(conds):
conditions = []
if "OR" in [child.name for child in conds.children]:
conditions.append("OR")
for child in conds.children:
if child.name == "COND":
conditions.append(process_condition(child))
elif "AND" in [child.name for child in conds.children]:
conditions.append("AND")
for child in conds.children:
if child.name == "COND":
conditions.append(process_condition(child))
else:
conditions.append("COND")
conditions.append(process_condition(conds))
return conditions
def process_condition(cond_node):
for leaf in cond_node.leaves():
if leaf.value in cond_dictionary:
cond_definition = cond_dictionary[leaf.value]
slot_values = []
for slot in cond_definition[0]:
slot_node = leaf.find_closest_node(slot[0])
if not slot_node:
if len(slot) == 2:
slot_values.append(slot[1])
else:
print "Slot fill error1!"
elif cond_node not in slot_node.ancestors():
if len(slot) == 2:
slot_values.append(slot[1])
else:
print "Slot fill error2!"
elif slot_node.name == "PLAYER":
slot_values.append(process_player(slot_node))
elif slot_node.name == "BOARD_PART":
slot_values.append(process_board_part(slot_node))
elif slot_node.name == "PIECE":
slot_values.append(process_piece(slot_node))
else:
slot_values.append(slot_node.value)
if cond_definition[-1]:
global_vars.write_queue.append([cond_definition[2], slot_values])
else:
global_vars.write_queue.append([cond_definition[2], []])
return cond_definition[1].format(*slot_values)
def process_player(player_node):
return "?player"
def process_board_part(board_part_node):
square_equivalents = ["cell"]
board_part = board_part_node
while board_part.children:
index = [child.name for child in board_part.children].index("BOARD_PART")
board_part = board_part[index]
if board_part.value in square_equivalents:
return "square"
else:
return board_part.value
def process_piece(piece_node):
piece = piece_node
while piece.children:
index = [child.name for child in piece.children].index("PIECE")
piece = piece[index]
if piece.value == "piece":
return "?piece"
else:
return piece.value
def translate_tree(nltk_tree):
if nltk_tree.height() == 2:
tree = ngdl_classes.Tree(nltk_tree.node)
tree.value = nltk_tree[0]
return tree
tree = ngdl_classes.Tree(nltk_tree.node)
for subtree in nltk_tree:
if type(subtree) == str:
tree.value = subtree
else:
tree.children.append(translate_tree(subtree))
for subtree in tree.children:
subtree.parent = tree
return tree
cond_dictionary = {"empty": [[["BOARD_PART"], ["NUM", "?col"], ["NUM", "?row"]], "(empty {0} {1} {2})", "board_part_empty", False],
"open": [[["BOARD_PART"], ["NUM", "?col"], ["NUM", "?row"]], "(open {0} {1} {2})", "board_part_open", False],
"full": [[["BOARD_PART"], ["NUM", "?col"], ["NUM", "?row"]], "(full {0} {1} {2})", "board_part_full", False],
"in-a-row": [[["NUM"], ["PLAYER", "?player"], ["PIECE", "?piece"]], "({0}_in_a_row {1} {2})", "x_in_a_row", True]
}
| gpl-3.0 | 578,165,373,975,564,700 | 38.073171 | 139 | 0.577507 | false | 3.636776 | false | false | false |
Dicotomix/DicotomixNewSrv | server.py | 1 | 12287 | import asyncio
import struct
import dictionary
import datetime
import tests
from collections import *
from os import listdir
from os.path import isfile, join
from enum import Enum
from dicotomix import Dicotomix, Direction, NotFoundException, OrderException
import unidecode
import sys
import numpy as np
ENABLE_TESTS = False
ENABLE_NGRAMS_LETTER = True
ENABLE_ELAG = False
grams = {}
spelling_buffer = []
default_letters = []
def _boundPrefix(left, right):
k = 0
for i in range(min(len(left),len(right))):
if left[i] != right[i]:
break
k += 1
return k
class _StateID(Enum):
HEADER = 0
LEN = 1
STR = 2
class _NetworkState:
def __init__(self):
self.header = None
self.len = None
self.str = None
def state(self):
if self.header == None:
return _StateID.HEADER
elif self.len == None:
return _StateID.LEN
else:
return _StateID.STR
DATA_PATH = "data/"
class Server(asyncio.Protocol):
def __init__(self):
self.dicotomix = None
self.words = None
self.buffer = []
self.state = _NetworkState()
self.spelling = False
self.users = []
self.login = None
self.logFile = None
def _log(self, header, message):
if self.logFile == None:
return
self.logFile.write('{:%Y-%m-%d %H:%M:%S}|{}|{}\n'.format(
datetime.datetime.now(),
header,
message
))
def connection_made(self, transport):
self.transport = transport
self.address = transport.get_extra_info('peername')
print('Connection accepted: {}'.format(*self.address))
def data_received(self, data):
self.buffer += data
while self.consume_buffer():
pass
def consume_buffer(self):
if self.state.state() == _StateID.HEADER and len(self.buffer) >= 1:
self.state.header = self.buffer[0]
self._log('NET', 'header:{}'.format(self.state.header))
return True
elif self.state.state() == _StateID.LEN and len(self.buffer) >= 3:
self.state.len = struct.unpack('>h', bytes(self.buffer[1 : 3]))[0]
self._log('NET', 'len:{}'.format(self.state.len))
return True
elif self.state.state() == _StateID.STR and len(self.buffer) >= 3 + self.state.len:
self.state.str = bytes(self.buffer[3 : 3 + self.state.len]).decode('utf-8')
self._log('NET', 'str:{}'.format(self.state.str))
self.process()
self.buffer = self.buffer[3 + self.state.len : ]
self.state = _NetworkState()
return True
return False
def process(self):
global spelling_buffer, grams, default_letters
left = None
word = None
right = None
try:
if self.state.header == 1:
self._log('DIC', 'restart')
left, word, right = self.dicotomix.nextWord(Direction.START, self.spelling)
print("ICI: ",len(self.dicotomix._words))
elif self.state.header == 2:
self._log('DIC', 'go_left')
left, word, right = self.dicotomix.nextWord(Direction.LEFT, self.spelling)
elif self.state.header == 3:
self._log('DIC', 'go_right')
left, word, right = self.dicotomix.nextWord(Direction.RIGHT, self.spelling)
elif self.state.header == 4:
self._log('DIC', 'discard')
left, word, right = self.dicotomix.discard()
elif self.state.header == 5: # spelling mode
self.dicotomix.toggleSpelling()
self.spelling = not self.spelling
spelling_buffer = []
if self.spelling:
default_letters = self.dicotomix._words
self._log('DIC', 'start_spelling')
else:
self.dicotomix._letters = default_letters[:]
self._EPSILON2 = self._FIRST_EPSILON2
self._log('DIC', 'stop_selling')
return
elif self.state.header == 6: # send users list
onlyfiles = [f for f in listdir(DATA_PATH) if isfile(join(DATA_PATH, f))]
for f in onlyfiles:
name, ext = f.split('.')
if ext == 'data':
self.users.append(name)
self.users.append("[new]")
data = '\n'.join(self.users).encode('utf8')
self.transport.write(struct.pack('>h', len(data)))
self.transport.write(struct.pack('>h', 0))
self.transport.write(data)
return
elif self.state.header == 7: # get user name
if self.login != None:
return
if self.state.str not in self.users:
print('Create user ' + self.state.str)
open(DATA_PATH + self.state.str + '.data', 'a').close()
addenda = ''
if ENABLE_ELAG == True:
addenda = '_elag'
self.login = self.state.str
words, letters = dictionary.loadDictionary2(
DATA_PATH + 'new_lexique'+addenda+'.csv',
DATA_PATH + self.login + '.data'
)
self.words = words
self.logFile = open(DATA_PATH + self.login + '.log', 'a')
self._log('DIC', 'connected:{}'.format(self.login))
# extract (cumulative frequency, word) from the whole dictionary
feed_words = dictionary.computeFeed(words)
feed_letters = dictionary.computeFeed(letters)
#for w in feed_words[:100]:
#print(w)
self.dicotomix = Dicotomix(feed_words, feed_letters)
if ENABLE_TESTS:
tests.testAll(Dicotomix(feed_words), feed_words, self.words)
if ENABLE_NGRAMS_LETTER:
grams = tests.ngram_letter(Dicotomix(feed_words), feed_words, self.words)
return
elif self.state.header == 8: # custom word
if self.spelling or len(self.state.str) == 0:
return
self._log('DIC', 'add_word:{}'.format(self.state.str))
freq = 1000.
normalized = dictionary.normalize(self.state.str)
add = False
if normalized not in self.words:
self.words[normalized] = [freq, [self.state.str]]
add = True
elif self.state.str not in self.words[normalized][1]:
self.words[normalized][0] += freq
self.words[normalized][1].append(self.state.str)
add = True
if add:
file = open(DATA_PATH + self.login + '.data', 'a')
file.write('{}|{}|{}\n'.format(
self.state.str,
normalized,
freq
))
file.close()
self.words = OrderedDict(sorted(
self.words.items(),
key = lambda x: x[0]
))
feed_words = dictionary.computeFeed(self.words)
self.dicotomix.reinit(feed_words)
else:
self._log('DIC', 'already_exists')
return
elif self.state.header == 9: #validate letter in spelling mode
spelling_buffer.append(self.state.str)
print(spelling_buffer)
H = 0.0
for (i,w) in enumerate(self.dicotomix._words[1:]):
print(w[1],self.dicotomix._wordLength(i))
H += self.dicotomix._wordLength(i)*np.log(self.dicotomix._wordLength(i))
H /= -np.log(26)
print("Old H: ", H)
the_end = ''.join(spelling_buffer[-4:])
if the_end in grams:
our_distro = grams[the_end]
default_val = 1
print(our_distro)
print(default_val)
new_letters = [[0.0,'a']]
for f,l in self.dicotomix._words[1:]:
if l in our_distro:
new_letters.append([our_distro[l]*1000,l])
else:
new_letters.append([default_val,l])
to_print = new_letters[:]
to_print.sort(reverse=True, key=lambda x: x[0])
for a in to_print:
print(a[1], a[0])
the_sum = 0.0
for i in range(len(new_letters)):
the_sum += new_letters[i][0]
new_letters[i][0] = the_sum
for i in range(len(new_letters)):
new_letters[i][0] /= the_sum
for i in range(len(new_letters)):
new_letters[i] = (new_letters[i][0],new_letters[i][1])
#for f,l in new_letters:
#print(f,l)
self.dicotomix._words = new_letters[:]
H = 0.0
for (i,w) in enumerate(self.dicotomix._words[1:]):
print(w[1],self.dicotomix._wordLength(i))
H += self.dicotomix._wordLength(i)*np.log(self.dicotomix._wordLength(i))
H /= -np.log(26)
self.dicotomix._EPSILON2 = 1-H
print("New H: ", H)
else:
self.dicotomix._words = default_letters[:]
return
except NotFoundException:
self._log('DIC', 'not_found_exception')
if self.spelling:
self._log('DIC', 'auto_restart')
left, word, right = self.dicotomix.nextWord(Direction.START)
else:
self._log('DIC', 'auto_spelling')
dummy = 'a'.encode('utf8')
self.transport.write(struct.pack('>h', len(dummy)))
self.transport.write(struct.pack('>h', -1)) # ask UI to start spelling mode
self.transport.write(dummy)
return
except OrderException:
self._log('NET', 'order_exception')
return
except AttributeError:
self._log('NET', 'attribute_error')
return
self._log('DIC', 'words:{}:{}:{}'.format(left, word, right))
prefix = _boundPrefix(left, right)
self._log('DIC', 'prefix:{}'.format(prefix))
if not self.spelling:
if word != 'a' and word != '.':
words = filter(lambda x: len(x) > 1, self.words[word][1])
else:
words = self.words[word][1]
else:
words = filter(lambda x: x[0] != '[', self.words[word][1])
if self.spelling:
print(spelling_buffer)
to_send = list(words)
canonique = ''
for k in to_send:
if len(k) != 1:
continue
canonique = unidecode.unidecode(k)
break
i_can = 0
for (i,k) in enumerate(to_send):
if k == canonique:
i_can = i
to_send[0],to_send[i_can] = to_send[i_can],to_send[0]
data = '\n'.join(to_send)
data = data.encode('utf8')
self.transport.write(struct.pack('>h', len(data)))
self.transport.write(struct.pack('>h', prefix))
self.transport.write(data)
def connection_lost(self, error):
if self.logFile != None:
self._log('NET', 'disconnected:{}'.format(self.login))
self.logFile.close()
if error:
print('ERROR: {}'.format(error))
else:
print('Closing connection')
super().connection_lost(error)
exit(0)
| mit | -5,168,988,374,299,193,000 | 35.032258 | 96 | 0.482787 | false | 4.098399 | true | false | false |
geodynamics/citcoms | visual/Mayavi2/original_plugins/Citcoms_Hdf2Vtk.py | 1 | 22127 | #!/usr/bin/env python
# Script to generate TVTK files from CitcomS hdf files
# author: Martin Weier
# Copyright (C) 2006 California Institue of Technology
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#import scipy
import sys
from datetime import datetime
from getopt import getopt, GetoptError
from pprint import *
from math import *
import tables #For HDF support
import numpy
import pyvtk
import sys
# defaults
path = "./example0.h5"
vtk_path = "./vtk_output"
vtkfile = "%s.%d.vtk"
initial = 0
timesteps= None
create_topo = False
create_bottom = False
create_surface = False
create_ascii = False
nx = None
ny = None
nz = None
nx_redu=None
ny_redu=None
nz_redu=None
el_nx_redu = None
el_ny_redu = None
el_nz_redu = None
radius_inner = None
radius_outer = None
nproc_surf = None
#Filehandler to the HDF file
f = None
#####################
polygons3d = [] # arrays containing connectivity information
polygons2d = []
counter=0 #Counts iterations of citcom2vtk
def print_help():
print "Program to convert CitcomS HDF to Vtk files.\n"
print "-p, --path [path to hdf] \n\t Specify input file."
print "-o, --output [output filename] \n\t Specify the path to the folder for output files."
print ("-i, --initial [initial timestep] \n\t Specify initial timestep to export. If not \n \
\t specified script starts exporting from timestep 0.")
print "-t, --timestep [max timestep] \n\t Specify to which timestep you want to export. If not\n \
\t specified export all all timestep starting from intial timestep."
print "-x, --nx_reduce [nx] \n\t Set new nx to reduce output grid."
print "-y, --ny_reduce [ny] \n\t Set new ny to reduce output grid."
print "-z, --nz_reduce [nz] \n\t Set new nz to reduce output grid."
print "-b, --bottom \n\t Set to export Bottom information to Vtk file."
print "-s, --surface \n\t Set to export Surface information to Vtk file."
print "-c, --createtopo \n\t Set to create topography information in bottom and surface Vtk file."
print "-a, --ascii \n\t Create Vtk ASCII encoded files instead if binary."
print "-h, --help, -? \n\t Print this help."
#Iterator for CitcomDataRepresentation(yxz) to VTK(xyz)
def vtk_iter(nx,ny,nz):
for i in xrange(nx):
for j in xrange(ny):
for k in xrange(nz):
yield k + i * nz + j * nz * nx
#Reduces the CitcomS grid
def reduce_iter(n,nredu):
i=0
n_f=float(n)
nredu_f=float(nredu)
fl=(n_f-1)/nredu_f
redu = 0
for i in xrange(nredu+1):
yield int(round(redu))
redu = redu + fl
#Transform Vectors in Spherical to Cartesian Coordinates 2d
#def velocity2cart2d(vel_colat, vel_lon,x , y):
# x1 = vel_colat*cos(x)*cos(y)-vel_lon*sin(y)
# y1 = vel_colat*cos(x)*sin(y)+vel_lon*cos(y)
# z1 = -vel_colat*sin(x)
# return x1,y1,z1
#Converts Spherical to Carthesian Coordinates 2d
#def RTF2XYZ2d(vel_colat, vel_lon):
# x1 = sin(vel_colat)*cos(vel_lon)
# y1 = sin(vel_colat)*sin(vel_lon)
# z1 = cos(vel_colat)
# return x1,y1,z1
#Transform Vectors in Spherical to Cartesian Coordinates
def velocity2cart(vel_colat,vel_long,r, x, y, z):
x1 = r*sin(x)*cos(y)+vel_colat*cos(x)*cos(y)-vel_long*sin(y)
y1 = r*sin(x)*sin(y)+vel_colat*cos(x)*sin(y)+vel_long*cos(y)
z1 = r*cos(x)-vel_colat*sin(x)
return x1, y1, z1
#Converts Spherical to Cartesian Coordinates
def RTF2XYZ(thet, phi, r):
x = r * sin(thet) * cos(phi)
y = r * sin(thet) * sin(phi)
z = r * cos(thet)
return x, y, z
#Reads Citcom Files and creates a VTK File
def citcom2vtk(t):
print "Timestep:",t
benchmarkstr = ""
#Assign create_bottom and create_surface to bottom and surface
#to make them valid in methods namespace
bottom = create_bottom
surface = create_surface
ordered_points = [] #reset Sequences for points
ordered_temperature = []
ordered_velocity = []
ordered_visc = []
#Surface and Bottom Points
#Initialize empty sequences
surf_vec = []
botm_vec = []
surf_topo = []
surf_hflux = []
botm_topo = []
botm_hflux = []
surf_points = []
botm_points = []
for capnr in xrange(nproc_surf):
###Benchmark Point 1 Start##
#start = datetime.now()
############################
print "Processing cap",capnr+1,"of",nproc_surf
cap = f.root._f_getChild("cap%02d" % capnr)
#Information from hdf
#This information needs to be read only once
hdf_coords = cap.coord[:]
hdf_velocity = cap.velocity[t]
hdf_temperature = cap.temperature[t]
hdf_viscosity = cap.viscosity[t]
###Benchmark Point 1 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 2 Start##
#start = datetime.now()
############################
#Create Iterator to change data representation
nx_redu_iter = reduce_iter(nx,nx_redu)
ny_redu_iter = reduce_iter(ny,ny_redu)
nz_redu_iter = reduce_iter(nz,nz_redu)
#vtk_i = vtk_iter(el_nx_redu,el_ny_redu,el_nz_redu)
# read citcom data - zxy (z fastest)
for j in xrange(el_ny_redu):
j_redu = ny_redu_iter.next()
nx_redu_iter = reduce_iter(nx,nx_redu)
for i in xrange(el_nx_redu):
i_redu = nx_redu_iter.next()
nz_redu_iter = reduce_iter(nz,nz_redu)
for k in xrange(el_nz_redu):
k_redu = nz_redu_iter.next()
colat, lon, r = map(float,hdf_coords[i_redu,j_redu,k_redu])
x_coord, y_coord, z_coord = RTF2XYZ(colat,lon,r)
ordered_points.append((x_coord,y_coord,z_coord))
ordered_temperature.append(float(hdf_temperature[i_redu,j_redu,k_redu]))
ordered_visc.append(float(hdf_viscosity[i_redu,j_redu,k_redu]))
vel_colat, vel_lon , vel_r = map(float,hdf_velocity[i_redu,j_redu,k_redu])
x_velo, y_velo, z_velo = velocity2cart(vel_colat,vel_lon,vel_r, colat,lon , r)
ordered_velocity.append((x_velo,y_velo,z_velo))
##Delete Objects for GC
del hdf_coords
del hdf_velocity
del hdf_temperature
del hdf_viscosity
###Benchmark Point 2 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 3 Start##
#start = datetime.now()
############################
#Bottom Information from hdf
if bottom == True:
try:
hdf_bottom_coord = cap.botm.coord[:]
hdf_bottom_heatflux = cap.botm.heatflux[t]
hdf_bottom_topography = cap.botm.topography[t]
hdf_bottom_velocity = cap.botm.velocity[t]
except:
print "\tCould not find bottom information in file.\n \
Set create bottom to false"
bottom = False
#Surface Information from hdf
if surface==True:
try:
hdf_surface_coord = cap.surf.coord[:]
hdf_surface_heatflux = cap.surf.heatflux[t]
hdf_surface_topography = cap.surf.topography[t]
hdf_surface_velocity = cap.surf.velocity[t]
except:
print "\tCould not find surface information in file.\n \
Set create surface to false"
surface = False
###Benchmark Point 3 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 4 Start##
#start = datetime.now()
############################
#Compute surface/bottom topography mean
if create_topo:
surf_mean=0.0
botm_mean=0.0
if surface:
for i in xrange(nx):
surf_mean += numpy.mean(hdf_surface_topography[i])
surf_mean = surf_mean/ny
if bottom:
for i in xrange(nx):
botm_mean += numpy.mean(hdf_bottom_topography[i])
botm_mean = botm_mean/nx
###Benchmark Point 4 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 5 Start##
#start = datetime.now()
############################
#Read Surface and Bottom Data
if bottom==True or surface == True:
for i in xrange(nx):
for j in xrange(ny):
if bottom==True:
#Bottom Coordinates
if create_topo==True:
colat, lon = hdf_bottom_coord[i,j]
x,y,z = RTF2XYZ(colat,lon,radius_inner+float( (hdf_bottom_topography[i,j]-botm_mean)*(10**21)/(6371000**2/10**(-6))/(3300*10)/1000 ))
botm_points.append((x,y,z))
else:
colat, lon = hdf_bottom_coord[i,j]
x,y,z = RTF2XYZ(colat, lon,radius_inner)
botm_points.append((x,y,z))
#Bottom Heatflux
botm_hflux.append(float(hdf_bottom_heatflux[i,j]))
#Bottom Velocity
vel_colat, vel_lon = map(float,hdf_bottom_velocity[i,j])
x,y,z = velocity2cart(vel_colat,vel_lon, radius_inner, colat, lon, radius_inner)
botm_vec.append((x,y,z))
if surface==True:
#Surface Information
if create_topo==True:
colat,lon = hdf_surface_coord[i,j]
#637100 = Earth radius, 33000 = ?
x,y,z = RTF2XYZ(colat,lon,radius_outer+float( (hdf_surface_topography[i,j]-surf_mean)*(10**21)/(6371000**2/10**(-6))/(3300*10)/1000 ))
surf_points.append((x,y,z))
else:
colat, lon = hdf_surface_coord[i,j]
x,y,z = RTF2XYZ(colat, lon,radius_outer)
surf_points.append((x,y,z))
#Surface Heatflux
surf_hflux.append(float(hdf_surface_heatflux[i,j]))
#Surface Velocity
vel_colat, vel_lon = map(float,hdf_surface_velocity[i,j])
x,y,z = velocity2cart(vel_colat,vel_lon, radius_outer, colat, lon, radius_outer)
surf_vec.append((x,y,z))
#del variables for GC
if bottom==True:
del hdf_bottom_coord
del hdf_bottom_heatflux
del hdf_bottom_velocity
if surface==True:
del hdf_surface_coord
del hdf_surface_heatflux
del hdf_surface_velocity
###Benchmark Point 5 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 6 Start##
#start = datetime.now()
############################
##################################################################
#Create Connectivity info
if counter==0:
#For 3d Data
i=1 #Counts X Direction
j=1 #Counts Y Direction
k=1 #Counts Z Direction
for n in xrange((el_nx_redu*el_ny_redu*el_nz_redu)-(el_nz_redu*el_nx_redu)):
if (i%el_nz_redu)==0: #X-Values!!!
j+=1 #Count Y-Values
if (j%el_nx_redu)==0:
k+=1 #Count Z-Values
if i%el_nz_redu!=0 and j%el_nx_redu!=0: #Check if Box can be created
#Get Vertnumbers
n0 = n+(capnr*(el_nx_redu*el_ny_redu*el_nz_redu))
n1 = n0+el_nz_redu
n2 = n1+el_nz_redu*el_nx_redu
n3 = n0+el_nz_redu*el_nx_redu
n4 = n0+1
n5 = n4+el_nz_redu
n6 = n5+el_nz_redu*el_nx_redu
n7 = n4+el_nz_redu*el_nx_redu
#Created Polygon Box
polygons3d.append([n0,n1,n2,n3,n4,n5,n6,n7]) #Hexahedron VTK Representation
i+=1
if bottom==True or surface==True:
#Connectivity for 2d-Data
i=1
for n in xrange((nx)*(ny) - ny):
if i%ny!=0 :
n0 = n+(capnr*((nx)*(ny)))
n1 = n0+1
n2 = n0+ny
n3 = n2+1
polygons2d.append([n0,n1,n2,n3])
i+=1
###Benchmark Point 6 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf\n" % (delta.seconds + float(delta.microseconds)/1e6)
#print benchmarkstr
#################################################################
#Write Data to VTK
#benchmarkstr = "\n\nIO:\n"
###Benchmark Point 7 Start##
#start = datetime.now()
############################
print 'Writing data to vtk...'
#Surface Points
if surface==True:
struct_coords = pyvtk.UnstructuredGrid(surf_points, pixel=polygons2d)
#topo_scal = pyvtk.Scalars(surf_topo,'Surface Topography', lookup_table='default')
hflux_scal = pyvtk.Scalars(surf_hflux,'Surface Heatflux',lookup_table='default')
vel_vec = pyvtk.Vectors(surf_vec,'Surface Velocity Vectors')
##
tempdata = pyvtk.PointData(hflux_scal,vel_vec)
data = pyvtk.VtkData(struct_coords, tempdata,'CitcomS Output %s Timestep %s' % ('surface info',t))
if create_ascii:
data.tofile(vtk_path + (vtkfile % ('surface',t)),)
else:
data.tofile(vtk_path + (vtkfile % ('surface',t)),'binary')
print "Written Surface information to file"
###Benchmark Point 7 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 8 Start##
#start = datetime.now()
############################
if bottom==True:
#Bottom Points
struct_coords = pyvtk.UnstructuredGrid(botm_points, pixel=polygons2d)
#topo_scal = pyvtk.Scalars(botm_topo,'Bottom Topography','default')
hflux_scal = pyvtk.Scalars(botm_hflux,'Bottom Heatflux','default')
vel_vec = pyvtk.Vectors(botm_vec,'Bottom Velocity Vectors')
##
tempdata = pyvtk.PointData(hflux_scal,vel_vec)
data = pyvtk.VtkData(struct_coords, tempdata, 'CitcomS Output %s Timestep %s' % ('Bottom info',t))
if create_ascii:
data.tofile(vtk_path + (vtkfile % ('bottom',t)))
else:
data.tofile(vtk_path + (vtkfile % ('bottom',t)),'binary')
print "Written Bottom information to file"
###Benchmark Point 8 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf," % (delta.seconds + float(delta.microseconds)/1e6)
###Benchmark Point 9 Start##
#start = datetime.now()
#General Data
struct_coords = pyvtk.UnstructuredGrid(ordered_points,hexahedron=polygons3d)
vel_vec = pyvtk.Vectors(ordered_velocity, 'Velocity Vectors')
temp_scal = pyvtk.Scalars(ordered_temperature,'Temperature Scalars','default')
visc_scal = pyvtk.Scalars(ordered_visc,'Viscosity Scalars','default')
##
tempdata = pyvtk.PointData(temp_scal,visc_scal,vel_vec)
data = pyvtk.VtkData(struct_coords, tempdata, 'CitcomS Output %s Timestep:%d NX:%d NY:%d NZ:%d Radius_Inner:%f' % (path,t,el_nx_redu,el_ny_redu,el_nz_redu,radius_inner))
############################
if create_ascii:
data.tofile(vtk_path + (vtkfile % ('general',t)))
else:
data.tofile(vtk_path + (vtkfile % ('general',t)),'binary')
print "Written general data to file"
###Benchmark Point 9 Stop##
#delta = datetime.now() - start
#benchmarkstr += "%.5lf\n" % (delta.seconds + float(delta.microseconds)/1e6)
#print benchmarkstr
#print "\n"
# parse command line parameters
def initialize():
global path
global vtk_path
global initial
global timesteps
global create_topo
global create_bottom
global create_surface
global create_ascii
global nx
global ny
global nz
global nx_redu
global ny_redu
global nz_redu
global el_nx_redu
global el_ny_redu
global el_nz_redu
global radius_inner
global radius_outer
global nproc_surf
global f
try:
opts, args = getopt(sys.argv[1:], "p:o:i:t:x:y:z:bscah?", ['path=','output=','timestep=','x=','y=','z=','bottom','surface','createtopo','ascii', 'help','?'])
except GetoptError, msg:
print "Error: %s" % msg
sys.exit(1)
if len(opts)<=1:
print_help()
sys.exit(0)
for opt,arg in opts:
if opt in ('-p','--path'):
path = arg
if opt in ('-o','--output'):
vtk_path = arg
if opt in ('-i','--initial'):
try:
initial = int(arg)
except ValueError:
print "Initial is not a number."
sys.exit(1)
if opt in ('-t','--timestep'):
try:
timesteps = int(arg)
except ValueError:
print "Timestep is not a number."
sys.exit(1)
if opt in ('-x','--nx_reduce'):
try:
nx_redu = int(arg)
except ValueError:
print "NX is not a number."
if opt in ('-y','--ny_reduce'):
try:
ny_redu = int(arg)
except ValueError:
print "NY is not a number."
if opt in ('-z','--nz_reduce'):
try:
nz_redu = int(arg)
except ValueError:
print "NZ is not a number."
if opt in ('-b','--bottom'):
create_bottom = True
if opt in ('-s','--surface'):
create_surface = True
if opt in ('-c','--createtopo'):
create_topo = True
if opt in ('-a','--ascii'):
create_ascii = True
if opt in ('-h','--help'):
print_help()
sys.exit(0)
if opt == '-?':
print_help()
sys.exit(0)
f = tables.openFile(path,'r')
nx = int(f.root.input._v_attrs.nodex)
ny = int(f.root.input._v_attrs.nodey)
nz = int(f.root.input._v_attrs.nodez)
#If not defined as argument read from hdf
hdf_timesteps = int(f.root.time.nrows)
if timesteps==None or timesteps>hdf_timesteps:
timesteps = hdf_timesteps
if nx_redu==None:
nx_redu = nx-1
if ny_redu==None:
ny_redu = ny-1
if nz_redu==None:
nz_redu = nz-1
if nx_redu>=nx:
nx_redu=nx-1
if ny_redu>=ny:
ny_redu=ny-1
if nz_redu>=nz:
nz_redu=nz-1
el_nx_redu = nx_redu+1
el_ny_redu = ny_redu+1
el_nz_redu = nz_redu+1
radius_inner = float(f.root.input._v_attrs.radius_inner)
radius_outer = float(f.root.input._v_attrs.radius_outer)
nproc_surf = int(f.root.input._v_attrs.nproc_surf)
###############################################################################
def citcoms_hdf2vtk():
global counter
#Call initialize to get and set input params
initialize()
d1 = datetime.now()
print "Converting Hdf to Vtk"
print "Initial:",initial, "Timesteps:",timesteps
print "NX:",el_nx_redu, "NY:",el_ny_redu, "NZ:", el_nz_redu
print "Create Bottom: ",create_bottom, " Create Surface: ", create_surface
print "Create Topography: ", create_topo
for t in xrange(initial,timesteps):
start = datetime.now()
citcom2vtk(t)
counter+=1
delta = datetime.now() - start
print "\t%.3lf sec" % (delta.seconds + float(delta.microseconds)/1e6)
d2 = datetime.now()
f.close()
print "Total: %d seconds" % (d2 - d1).seconds
###############################################################################
if __name__ == '__main__':
citcoms_hdf2vtk()
| gpl-2.0 | -704,908,123,187,786,500 | 34.011076 | 173 | 0.519953 | false | 3.656751 | false | false | false |
inveniosoftware/invenio-communities | tests/records/collections/test_collections.py | 1 | 3019 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2021 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Community module tests."""
import pytest
from flask import url_for
from invenio_accounts.testutils import login_user_via_session
@pytest.mark.skip()
def test_simple_flow(db, es_clear, community, accepted_community_record, client, community_owner):
"""Test basic operations on collections."""
comid, community = community
collections_list_url = url_for(
'invenio_communities_collections.collections_list',
pid_value=comid.pid_value)
# list
resp = client.get(collections_list_url)
assert resp.status_code == 200
assert resp.json == {}
# create
collection_data = {
'id': 'test',
'title': 'Test collection',
'description': 'Test collection description',
}
resp = client.post(collections_list_url, json=collection_data)
assert resp.status_code == 401
login_user_via_session(client, user=community_owner)
resp = client.post(collections_list_url, json=collection_data)
assert resp.status_code == 201
created_resp_json = resp.json
collection_item_url = created_resp_json['links']['self']
assert created_resp_json == {
'title': collection_data['title'],
'description': collection_data['description'],
'links': {
'self': '/communities/{}/collections/test'.format(comid.pid_value)
},
}
# read
resp = client.get(collection_item_url)
assert resp.status_code == 200
assert resp.json == created_resp_json
# update
resp = client.put(collection_item_url, json={
'title': 'New test title',
# NOTE: removes description
})
assert resp.status_code == 200
assert resp.json == {
'title': 'New test title',
'description': None,
'links': {'self': collection_item_url},
}
# get record collections
community_record_url = url_for(
'invenio_communities_collections.records',
pid_value=comid.pid_value,
record_pid=accepted_community_record.record.pid.pid_value
)
resp = client.get(community_record_url)
assert resp.status_code == 200
assert '_collections' not in resp.json
# update record collections
resp = client.put(community_record_url, json={
'collections': [{'id': 'test'}]
})
assert resp.status_code == 200
assert resp.json['_collections'] == [{'id': 'test'}]
# delete
resp = client.delete(collection_item_url)
assert resp.status_code == 204
resp = client.get(collection_item_url)
assert resp.status_code == 404
@pytest.mark.skip()
def test_permissions(db, es_clear, community, accepted_community_record, client, community_owner, authenticated_user, record_owner):
"""Test collection permissions."""
# TODO: write tests for permissions
pass
| mit | 7,395,856,467,491,684,000 | 29.806122 | 132 | 0.650547 | false | 3.850765 | true | false | false |
fanglinfang/myuw | myuw/logger/session_log.py | 1 | 1051 | from myuw.dao.affiliation import get_base_campus
from myuw.dao.enrollment import get_current_quarter_enrollment
from myuw.dao.gws import is_grad_student, is_undergrad_student
import logging
import json
import hashlib
logger = logging.getLogger('session')
def log_session(netid, session_key, request):
if session_key is None:
session_key = ''
session_hash = hashlib.md5(session_key).hexdigest()
log_entry = {'netid': netid,
'session_key': session_hash,
'class_level': None,
'is_grad': None,
'campus': None}
try:
level = get_current_quarter_enrollment(request).class_level
log_entry['class_level'] = level
is_mobile = request.is_mobile or request.is_tablet
log_entry['is_mobile'] = bool(is_mobile)
except AttributeError:
pass
log_entry['is_grad'] = is_grad_student()
log_entry['is_ugrad'] = is_undergrad_student()
log_entry['campus'] = get_base_campus(request)
logger.info(json.dumps(log_entry))
| apache-2.0 | 1,625,579,248,412,522,000 | 31.84375 | 67 | 0.642245 | false | 3.538721 | false | false | false |
shiblon/pytour | 3/tutorials/generators3.py | 1 | 2336 | # vim:tw=50
"""Generators for Refactoring
Now that we know how to make our own generators,
let's do some refactoring to make use of this idea
and clean up the code a bit. We'll start by
splitting out the |clean_lines| function, which
basically just skips blank lines and comments,
stripping unnecessary space.
This notion of converting one iterator into
another is prevalent in Python. As one rather
common example, the |enumerate| builtin converts
an iterable over items into an iterable over
|(index,item)| pairs. You built something similar
earlier.
Generators make refactoring sequence operations
really easy, even operations that need to remember
something about past elements. Without them,
separating functionality like this would be hard
or sometimes even impossible.
Exercises
- Look carefully at "clean_lines" and make sure
you understand how it works.
- Use "enumerate" to get line numbers with the
data, and emit that line number in the
ValueError message. Note that in string
formatting, {0} means "the first argument". You
can put any number in there, so long as it
matches the position of what you pass to
|format|. So, you could use |{2}| for the line
number if you want.
"""
__doc__ = """Refactoring functionality.
Changes: we now clean out comments and blank lines
in a different function, and the error message for
bad dates has the line number in it.
"""
def clean_lines(lines):
for line in lines:
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
def parsed_measurements(lines):
last_date = ""
# TODO:
# Use 'enumerate(clean_lines(lines))' to get
# (number, line) pairs. Use the number in the
# exception message to show on what line the
# error occurred.
for line in clean_lines(lines):
date, measurement = line.split()
if date <= last_date:
raise ValueError("Non-increasing: {0} -> {1}".format(
last_date, date))
last_date = date
yield date, measurement
if __name__ == '__main__':
_assert_equal([('2012-10-10', '5.4'), ('2012-10-11', '5.3')],
list(parsed_measurements(['2012-10-10 5.4',
'2012-10-11 5.3'])))
_assert_raises(ValueError, lambda x: list(parsed_measurements(x)),
['2012-10-10 5.4', '2012-10-09 5.3'])
| apache-2.0 | -6,395,722,228,013,602,000 | 29.736842 | 68 | 0.689212 | false | 3.792208 | false | false | false |
anish/buildbot | master/buildbot/test/unit/test_reporters_notifier.py | 1 | 14563 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import copy
import sys
from mock import Mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import config
from buildbot.process.results import CANCELLED
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.reporters.notifier import NotifierBase
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.test.util.misc import TestReactorMixin
from buildbot.test.util.notifier import NotifierTestMixin
py_27 = sys.version_info[0] > 2 or (sys.version_info[0] == 2
and sys.version_info[1] >= 7)
class TestMailNotifier(ConfigErrorsMixin, TestReactorMixin,
unittest.TestCase, NotifierTestMixin):
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantData=True, wantDb=True,
wantMq=True)
@defer.inlineCallbacks
def setupNotifier(self, *args, **kwargs):
mn = NotifierBase(*args, **kwargs)
mn.sendMessage = Mock(spec=mn.sendMessage)
mn.sendMessage.return_value = "<message>"
yield mn.setServiceParent(self.master)
yield mn.startService()
return mn
def test_init_enforces_tags_and_builders_are_mutually_exclusive(self):
with self.assertRaises(config.ConfigErrors):
NotifierBase(tags=['fast', 'slow'], builders=['a', 'b'])
def test_init_warns_notifier_mode_all_in_iter(self):
with self.assertRaisesConfigError(
"mode 'all' is not valid in an iterator and must be passed in as a separate string"):
NotifierBase(mode=['all'])
@defer.inlineCallbacks
def test_buildsetComplete_sends_message(self):
_, builds = yield self.setupBuildResults(SUCCESS)
mn = yield self.setupNotifier(buildSetSummary=True,
mode=("failing", "passing", "warnings"),
builders=["Builder1", "Builder2"])
mn.buildMessage = Mock()
yield mn.buildsetComplete('buildset.98.complete',
dict(bsid=98))
mn.buildMessage.assert_called_with(
"whole buildset",
builds, SUCCESS)
self.assertEqual(mn.buildMessage.call_count, 1)
@defer.inlineCallbacks
def test_buildsetComplete_doesnt_send_message(self):
_, builds = yield self.setupBuildResults(SUCCESS)
# disable passing...
mn = yield self.setupNotifier(buildSetSummary=True,
mode=("failing", "warnings"),
builders=["Builder1", "Builder2"])
mn.buildMessage = Mock()
yield mn.buildsetComplete('buildset.98.complete',
dict(bsid=98))
self.assertFalse(mn.buildMessage.called)
@defer.inlineCallbacks
def test_isMessageNeeded_ignores_unspecified_tags(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
build['builder']['tags'] = ['slow']
mn = yield self.setupNotifier(tags=["fast"])
self.assertFalse(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_tags(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
build['builder']['tags'] = ['fast']
mn = yield self.setupNotifier(tags=["fast"])
self.assertTrue(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_schedulers_sends_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(schedulers=['checkin'])
self.assertTrue(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_schedulers_doesnt_send_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(schedulers=['some-random-scheduler'])
self.assertFalse(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_branches_sends_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(branches=['master'])
self.assertTrue(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def test_isMessageNeeded_branches_doesnt_send_mail(self):
_, builds = yield self.setupBuildResults(SUCCESS)
build = builds[0]
# force tags
mn = yield self.setupNotifier(branches=['some-random-branch'])
self.assertFalse(mn.isMessageNeeded(build))
@defer.inlineCallbacks
def run_simple_test_sends_message_for_mode(self, mode, result, shouldSend=True):
_, builds = yield self.setupBuildResults(result)
mn = yield self.setupNotifier(mode=mode)
self.assertEqual(mn.isMessageNeeded(builds[0]), shouldSend)
def run_simple_test_ignores_message_for_mode(self, mode, result):
return self.run_simple_test_sends_message_for_mode(mode, result, False)
def test_isMessageNeeded_mode_all_for_success(self):
return self.run_simple_test_sends_message_for_mode("all", SUCCESS)
def test_isMessageNeeded_mode_all_for_failure(self):
return self.run_simple_test_sends_message_for_mode("all", FAILURE)
def test_isMessageNeeded_mode_all_for_warnings(self):
return self.run_simple_test_sends_message_for_mode("all", WARNINGS)
def test_isMessageNeeded_mode_all_for_exception(self):
return self.run_simple_test_sends_message_for_mode("all", EXCEPTION)
def test_isMessageNeeded_mode_all_for_cancelled(self):
return self.run_simple_test_sends_message_for_mode("all", CANCELLED)
def test_isMessageNeeded_mode_failing_for_success(self):
return self.run_simple_test_ignores_message_for_mode("failing", SUCCESS)
def test_isMessageNeeded_mode_failing_for_failure(self):
return self.run_simple_test_sends_message_for_mode("failing", FAILURE)
def test_isMessageNeeded_mode_failing_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("failing", WARNINGS)
def test_isMessageNeeded_mode_failing_for_exception(self):
return self.run_simple_test_ignores_message_for_mode("failing", EXCEPTION)
def test_isMessageNeeded_mode_exception_for_success(self):
return self.run_simple_test_ignores_message_for_mode("exception", SUCCESS)
def test_isMessageNeeded_mode_exception_for_failure(self):
return self.run_simple_test_ignores_message_for_mode("exception", FAILURE)
def test_isMessageNeeded_mode_exception_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("exception", WARNINGS)
def test_isMessageNeeded_mode_exception_for_exception(self):
return self.run_simple_test_sends_message_for_mode("exception", EXCEPTION)
def test_isMessageNeeded_mode_warnings_for_success(self):
return self.run_simple_test_ignores_message_for_mode("warnings", SUCCESS)
def test_isMessageNeeded_mode_warnings_for_failure(self):
return self.run_simple_test_sends_message_for_mode("warnings", FAILURE)
def test_isMessageNeeded_mode_warnings_for_warnings(self):
return self.run_simple_test_sends_message_for_mode("warnings", WARNINGS)
def test_isMessageNeeded_mode_warnings_for_exception(self):
return self.run_simple_test_ignores_message_for_mode("warnings", EXCEPTION)
def test_isMessageNeeded_mode_passing_for_success(self):
return self.run_simple_test_sends_message_for_mode("passing", SUCCESS)
def test_isMessageNeeded_mode_passing_for_failure(self):
return self.run_simple_test_ignores_message_for_mode("passing", FAILURE)
def test_isMessageNeeded_mode_passing_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("passing", WARNINGS)
def test_isMessageNeeded_mode_passing_for_exception(self):
return self.run_simple_test_ignores_message_for_mode("passing", EXCEPTION)
@defer.inlineCallbacks
def run_sends_message_for_problems(self, mode, results1, results2, shouldSend=True):
_, builds = yield self.setupBuildResults(results2)
mn = yield self.setupNotifier(mode=mode)
build = builds[0]
if results1 is not None:
build['prev_build'] = copy.deepcopy(builds[0])
build['prev_build']['results'] = results1
else:
build['prev_build'] = None
self.assertEqual(mn.isMessageNeeded(builds[0]), shouldSend)
def test_isMessageNeeded_mode_problem_sends_on_problem(self):
return self.run_sends_message_for_problems("problem", SUCCESS, FAILURE, True)
def test_isMessageNeeded_mode_problem_ignores_successful_build(self):
return self.run_sends_message_for_problems("problem", SUCCESS, SUCCESS, False)
def test_isMessageNeeded_mode_problem_ignores_two_failed_builds_in_sequence(self):
return self.run_sends_message_for_problems("problem", FAILURE, FAILURE, False)
def test_isMessageNeeded_mode_change_sends_on_change(self):
return self.run_sends_message_for_problems("change", FAILURE, SUCCESS, True)
def test_isMessageNeeded_mode_change_sends_on_failure(self):
return self.run_sends_message_for_problems("change", SUCCESS, FAILURE, True)
def test_isMessageNeeded_mode_change_ignores_first_build(self):
return self.run_sends_message_for_problems("change", None, FAILURE, False)
def test_isMessageNeeded_mode_change_ignores_first_build2(self):
return self.run_sends_message_for_problems("change", None, SUCCESS, False)
def test_isMessageNeeded_mode_change_ignores_same_result_in_sequence(self):
return self.run_sends_message_for_problems("change", SUCCESS, SUCCESS, False)
def test_isMessageNeeded_mode_change_ignores_same_result_in_sequence2(self):
return self.run_sends_message_for_problems("change", FAILURE, FAILURE, False)
@defer.inlineCallbacks
def setupBuildMessage(self, **mnKwargs):
_, builds = yield self.setupBuildResults(SUCCESS)
mn = yield self.setupNotifier(**mnKwargs)
mn.messageFormatter = Mock(spec=mn.messageFormatter)
mn.messageFormatter.formatMessageForBuildResults.return_value = {"body": "body", "type": "text",
"subject": "subject"}
yield mn.buildMessage("mybldr", builds, SUCCESS)
return (mn, builds)
@defer.inlineCallbacks
def test_buildMessage_nominal(self):
mn, builds = yield self.setupBuildMessage(mode=("change",))
build = builds[0]
mn.messageFormatter.formatMessageForBuildResults.assert_called_with(
('change',), 'mybldr', build['buildset'], build, self.master,
None, ['me@foo'])
self.assertEqual(mn.sendMessage.call_count, 1)
mn.sendMessage.assert_called_with('body', 'subject', 'text', 'mybldr', SUCCESS, builds,
['me@foo'], [], [])
@defer.inlineCallbacks
def test_buildMessage_addLogs(self):
mn, builds = yield self.setupBuildMessage(mode=("change",), addLogs=True)
self.assertEqual(mn.sendMessage.call_count, 1)
# make sure the logs are send
self.assertEqual(mn.sendMessage.call_args[0][8][0]['logid'], 60)
# make sure the log has content
self.assertIn(
"log with", mn.sendMessage.call_args[0][8][0]['content']['content'])
@defer.inlineCallbacks
def test_buildMessage_addPatch(self):
mn, builds = yield self.setupBuildMessage(mode=("change",), addPatch=True)
self.assertEqual(mn.sendMessage.call_count, 1)
# make sure the patch are sent
self.assertEqual(mn.sendMessage.call_args[0][7],
[{'author': 'him@foo',
'body': b'hello, world',
'comment': 'foo',
'level': 3,
'patchid': 99,
'subdir': '/foo'}])
@defer.inlineCallbacks
def test_buildMessage_addPatchNoPatch(self):
SourceStamp = fakedb.SourceStamp
class NoPatchSourcestamp(SourceStamp):
def __init__(self, id, patchid):
super().__init__(id=id)
self.patch(fakedb, 'SourceStamp', NoPatchSourcestamp)
mn, builds = yield self.setupBuildMessage(mode=("change",), addPatch=True)
self.assertEqual(mn.sendMessage.call_count, 1)
# make sure no patches are sent
self.assertEqual(mn.sendMessage.call_args[0][7], [])
@defer.inlineCallbacks
def test_workerMissingSendMessage(self):
mn = yield self.setupNotifier(watchedWorkers=['myworker'])
yield mn.workerMissing('worker.98.complete',
dict(name='myworker',
notify=["[email protected]"],
workerinfo=dict(admin="myadmin"),
last_connection="yesterday"))
self.assertEqual(mn.sendMessage.call_count, 1)
text = mn.sendMessage.call_args[0][0]
recipients = mn.sendMessage.call_args[1]['users']
self.assertEqual(recipients, ['[email protected]'])
self.assertIn(
b"has noticed that the worker named myworker went away", text)
| gpl-2.0 | 6,238,227,632,631,262,000 | 40.847701 | 104 | 0.661334 | false | 3.90953 | true | false | false |
cavestruz/L500analysis | plotting/profiles/T_Vr_evolution/Vr_evolution/plot_Vrall_nu_binned_r500c.py | 1 | 3207 | from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.plotting.profiles.tools.select_profiles \
import nu_cut, prune_dict
from L500analysis.utils.constants import linear_rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
nu_threshold = {0:[1,1.7],1:[1.7,2.3],2:[2.3, 2.7]} # 1, 1.7, 2.3, 2.7
nu_threshold_key = 2
nu_label = r"%0.1f$\leq\nu_{500c}\leq$%0.1f"%(nu_threshold[nu_threshold_key][0],
nu_threshold[nu_threshold_key][1])
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['r_mid',
'R/R500c',
'vel_gas_rad_avg',
'vel_dark_rad_avg',
'bulk_vel_gas_rad_avg',
'VrVc_ratio_500c',
]
halo_properties_list=['r500c','M_total_500c','nu_500c']
Vratio=r"$\tilde{V}=1-V_r/V_{circ,500c}$"
fVz1=r"$\tilde{V}/\tilde{V}(z=1)$"
pa = PlotAxes(figname='Vall_r500c_nu%01d'%nu_threshold_key,
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Vratio,fVz1],
ylog=[False,False],
xlabel=r"$R/R_{500c}$",
xlim=(0.2,5),
ylims=[(.81,2.),(0.6,1.39)])
Vr={}
Vplots = [Vr]
clkeys = ['VrVc_ratio_500c']
linestyles = ['-']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
nu_cut_hids = nu_cut(nu=cldata['nu_500c'], threshold=nu_threshold[nu_threshold_key])
for Vplot, key in zip(Vplots,clkeys) :
pruned_profiles = prune_dict(d=cldata[key],k=nu_cut_hids)
Vplot[aexp] = calculate_profiles_mean_variance(pruned_profiles)
pa.axes[Vratio].plot( linear_rbins, Vr[aexp]['mean'],color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
for aexp in aexps :
for V,ls in zip(Vplots,linestyles) :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=V[aexp]['mean'],
var_profile1=V[aexp]['var'],
mean_profile2=V[0.5]['mean'],
var_profile2=V[0.5]['var'],
)
pa.axes[fVz1].plot( linear_rbins, fractional_evolution['mean'],
color=color(aexp),ls=ls)
pa.axes[Vratio].text(0.2,1.9,nu_label)
pa.axes[Vratio].tick_params(labelsize=12)
pa.axes[Vratio].tick_params(labelsize=12)
pa.axes[fVz1].set_yticks(arange(0.6,1.4,0.2))
pa.set_legend(axes_label=Vratio,ncol=3,loc='lower right', frameon=False)
pa.color_legend_texts(axes_label=Vratio)
pa.savefig()
| mit | 24,674,641,612,658,730 | 35.033708 | 90 | 0.586218 | false | 2.786273 | false | false | false |
goldsborough/ig | ig/colors.py | 1 | 1072 | import random
def random_color(base, variation):
'''
Returns a random, bounded color value.
Args:
base: Some base color component (between 0 and 255)
variation: The degree of variation (around the color)
Returns:
A random color.
'''
color = base + (2 * random.random() - 1) * variation
return max(8, min(int(color), 256))
class Colors(object):
'''
Aggregates information about the color scheme of the visualization.
'''
def __init__(self, base_colors):
'''
Constructor.
Args:
base_colors: The base colors around which to vary
'''
self.base = list(base_colors)
self.variation = None
self.alpha_min = None
def generate(self):
'''
Generates a color.
Returns:
A new RGBA color value.
'''
rgba = [random_color(color, self.variation) for color in self.base]
rgba.append(max(self.alpha_min, random.random()))
return 'rgba({0})'.format(','.join(map(str, rgba)))
| mit | 1,673,187,914,116,669,000 | 23.363636 | 75 | 0.570896 | false | 4.030075 | false | false | false |
NERC-CEH/jules-jasmin | job_runner/job_runner/controllers/jobs.py | 1 | 5375 | """
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
from pylons.controllers.util import abort
from pylons.decorators import jsonify
from job_runner.lib.base import BaseController
from job_runner.utils.constants import *
from job_runner.services.job_service import JobService
from job_runner.model.job_status import JobStatus
from job_runner.services.service_exception import ServiceException
log = logging.getLogger(__name__)
def _validate_namelist(namelist):
"""
Validate that the namelist has a name and that the parameters have names
:param namelist: the name list
"""
if not JSON_MODEL_NAMELIST_NAME in namelist or namelist[JSON_MODEL_NAMELIST_NAME].strip() == '':
abort(400, "Invalid name for one of the name lists")
if not JSON_MODEL_PARAMETERS in namelist:
abort(400, "namelist has no parameters in")
for key in namelist[JSON_MODEL_PARAMETERS]:
if key.strip() == '':
abort(400, "A parameter name can not be blank")
def _validate_namelist_file(namelist_file):
"""
Validate that the namelist file has a filename and contains some namelists
:param namelist_file: the name list file
"""
if not JSON_MODEL_NAMELIST_FILE_FILENAME in namelist_file \
or namelist_file[JSON_MODEL_NAMELIST_FILE_FILENAME].strip() == '':
abort(400, "Invalid filename for one of the namelist files")
if not JSON_MODEL_NAMELISTS in namelist_file or len(namelist_file[JSON_MODEL_NAMELISTS]) == 0:
abort(400, "namelist file has no namelists in")
for namelist in namelist_file[JSON_MODEL_NAMELISTS]:
_validate_namelist(namelist)
class JobsController(BaseController):
"""
Controller for jobs
"""
def __init__(self, job_service=JobService()):
"""
:param job_service: the job service
"""
self._job_service = job_service
@jsonify
def new(self):
"""
Create a new job submission.
"""
json = self._get_json_abort_on_error()
log.debug("New Model with parameters %s" % json)
self._check_field_exists_in_json("code version", json, JSON_MODEL_CODE_VERSION)
if json[JSON_MODEL_CODE_VERSION] not in VALID_CODE_VERSIONS:
abort(400, "Invalid code version")
self._check_field_exists_in_json("model run id", json, JSON_MODEL_RUN_ID, is_int=True)
self._check_field_exists_in_json("user id", json, JSON_USER_ID, is_int=True)
self._check_field_exists_in_json("user name", json, JSON_USER_NAME)
self._check_field_exists_in_json("user email address", json, JSON_USER_EMAIL)
self._check_field_exists_in_json("namelist files", json, JSON_MODEL_NAMELIST_FILES)
if len(json[JSON_MODEL_NAMELIST_FILES]) == 0:
abort(400, "Invalid namelist files")
self._check_field_exists_in_json("land cover", json, JSON_LAND_COVER)
namelist = []
for namelist_file in json[JSON_MODEL_NAMELIST_FILES]:
namelist.append(_validate_namelist_file(namelist_file))
try:
return self._job_service.submit(json)
except ServiceException, ex:
abort(400, ex.message)
@jsonify
def status(self):
"""
Return the statuses of the jobs requested
"""
json = self._get_json_abort_on_error()
log.debug("Status with parameters %s" % json)
queued_jobs_status = self._job_service.queued_jobs_status()
job_statuses = []
for job_id in json:
try:
job_status = JobStatus(int(job_id))
job_status.check(self._job_service, queued_jobs_status)
job_statuses.append(job_status)
except ValueError:
abort(400, "Job ids must all be integers")
return job_statuses
@jsonify
def delete(self):
"""
Delete a model run directory
"""
json = self._get_json_abort_on_error()
log.debug("Delete with parameters %s" % json)
if JSON_MODEL_RUN_ID not in json:
abort(400, "Model run id must be included")
try:
model_run_id = int(json[JSON_MODEL_RUN_ID])
self._job_service.delete(model_run_id)
except ValueError:
abort(400, "Model run id must be an integer")
except ServiceException, ex:
abort(400, ex.message)
except Exception:
log.exception("Unknown error when trying to delete model run directory")
abort(400, "Unknown error when trying to delete model run directory")
| gpl-2.0 | 8,434,452,970,082,947,000 | 33.455128 | 100 | 0.641302 | false | 3.960943 | false | false | false |
parantapa/pbdset | pbdset.py | 1 | 14786 | # encoding: utf-8
# pylint: disable=too-many-instance-attributes
# pylint: disable=attribute-defined-outside-init
#
# ____________________/\
# \______ \______ )/______
# | ___/| | _// ___/
# | | | | \\___ \
# |____| |______ /____ >
# \/ \/
# ________ __ __
# \______ \ _____ _/ |______ ______ _____/ |_
# | | \\__ \\ __\__ \ / ___// __ \ __\
# | ` \/ __ \| | / __ \_\___ \\ ___/| |
# /_______ (____ /__| (____ /____ >\___ >__|
# \/ \/ \/ \/ \/
"""
Read and write PB's Dataset files.
"""
import sys
import os.path
import struct
import lmdb
# Import comression/decompression functions
from zlib import compress as zlib_comp, decompress as zlib_decomp
from lz4 import compress as _lz4_comp, decompress as lz4_decomp
from backports.lzma import compress as _xz_comp, \
decompress as _xz_decomp, \
CHECK_NONE
from zstd import ZstdCompressor, ZstdDecompressor
def lz4_comp(data, _):
return _lz4_comp(data)
def xz_comp(data, level):
return _xz_comp(data, preset=level, check=CHECK_NONE)
def xz_decomp(data):
return _xz_decomp(data)
_zcomp = {}
_zdecomp = ZstdDecompressor()
def zstd_comp(data, level):
if level not in _zcomp:
_zcomp[level] = ZstdCompressor(level=level,
write_checksum=False,
write_content_size=True)
return _zcomp[level].compress(data)
def zstd_decomp(data):
return _zdecomp.decompress(data)
# We serialize using msgpack
from msgpack import packb as _packb, unpackb as _unpackb
def pack(x):
return _packb(x, use_bin_type=True)
def unpack(x, default=None):
if x is None:
return default
else:
return _unpackb(x, encoding="utf-8")
# Setup the checksum function
from zlib import adler32
def checksum(data):
return adler32(data) & 0xffffffff
COMP_TABLE = {
"none": (lambda data, level: data, lambda comp: comp),
"zlib": (zlib_comp, zlib_decomp),
"lz4": (lz4_comp, lz4_decomp),
"xz": (xz_comp, xz_decomp),
"zstd": (zstd_comp, zstd_decomp)
}
VERSION = 0.1
class Closes(object): # pylint: disable=too-few-public-methods
"""
Runs close() on context exiting and garbage collection.
"""
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def close(self):
raise NotImplementedError()
class DataStore(Closes):
"""
An abstraction layer over underlying data store.
"""
META_KEYS = frozenset([
"version", "block_length", "length", "comp_format", "comp_level"
])
DEFAULT_PARAMS = {
"map_size": 2 ** 40,
"subdir": False,
"readonly": True,
"metasync": False,
"sync": False,
"mode": 0o644,
"readahead": False,
"meminit": False,
"max_dbs": 2,
"lock": False,
}
def __init__(self, fname, write=False, create=False):
self.closed = True
self.fname = fname
self.write = write
self.create = create
_exists = os.path.exists(fname)
if create and _exists:
raise IOError("File '%s' already exists" % fname)
if not create and not _exists:
raise IOError("File '%s' doesn't exist" % fname)
params = dict(DataStore.DEFAULT_PARAMS)
params["readonly"] = not write
self.env = lmdb.open(self.fname, **params)
try:
self.meta_db = self.env.open_db("meta", create=create)
self.block_db = self.env.open_db("block", create=create)
self.txn = self.env.begin(write=write, buffers=True)
self.closed = False
except Exception:
self.env.close()
raise
def close(self):
if not self.closed:
self.txn.commit()
if self.write:
self.env.sync(True)
self.env.close()
self.closed = True
def get(self, i):
ib = struct.pack(">I", i)
return self.txn.get(ib, db=self.block_db)
def put(self, i, block):
ib = struct.pack(">I", i)
self.txn.put(ib, block, db=self.block_db)
def __getattr__(self, key):
if key not in DataStore.META_KEYS:
raise AttributeError("Unknown attribute: '%s'" % key)
value = self.txn.get(key, db=self.meta_db)
if value is None:
return None
else:
return unpack(value)
def __setattr__(self, key, value):
if key in DataStore.META_KEYS:
self.txn.put(key, pack(value), db=self.meta_db)
else:
self.__dict__[key] = value
def comp_block(block_raw, comp_fn, comp_level):
"""
Compress the block and add header.
"""
block_chksum = checksum(block_raw)
block_comp = comp_fn(block_raw, comp_level)
header = struct.pack("<II", len(block_raw), block_chksum)
block_hcomp = header + block_comp
return block_hcomp
def decomp_block(block_hcomp, decomp_fn):
"""
Decompress the block.
"""
len_block_raw, stored_chksum = struct.unpack_from("<II", block_hcomp)
block_comp = buffer(block_hcomp, 8, len(block_hcomp) - 8)
block_raw = decomp_fn(block_comp)
block_chksum = checksum(block_raw)
if len(block_raw) != len_block_raw:
raise IOError("Size mismatch: %d != %d"
% (len(block_raw), len_block_raw))
if block_chksum != stored_chksum:
raise IOError("Checksum mismatch: %0x != %0x"
% (block_chksum, stored_chksum))
return block_raw
class DatasetReader(Closes):
"""
Read entries from a dataset file.
"""
def __init__(self, fname):
self.closed = True
self.store = DataStore(fname)
try:
if self.store.version != VERSION:
raise IOError("Invalid version: %d" % self.store.version)
self.block_length = self.store.block_length
self.length = self.store.length
self.comp_format = self.store.comp_format
self.comp_level = self.store.comp_level
try:
_, self.decomp_fn = COMP_TABLE[self.comp_format]
except KeyError:
raise IOError("Unknown compression: %s" % self.comp_format)
self.closed = False
except Exception:
self.store.close()
raise
# number of blocks already present in the dataset
self.num_blocks = self.length // self.block_length
self.num_blocks += bool(self.length % self.block_length)
# NOTE: Only used by get_idx
# get_idxs and get_slice use their own local block storage
self.cur_block_idx = -1
self.cur_block = None
def close(self):
if not self.closed:
self.store.close()
self.closed = True
def load_block(self, i):
"""
Load a block from the given file.
"""
block_hcomp = self.store.get(i)
if block_hcomp is None:
raise IOError("Block %d not in store" % i)
try:
block_raw = decomp_block(block_hcomp, self.decomp_fn)
except IOError as e:
raise IOError("Block %d: %s", (i, e)), None, sys.exc_info()[2]
return unpack(block_raw)
def __len__(self):
return self.length
def get_idx(self, n):
"""
Get the value at given idx.
"""
n = (self.length + n) if n < 0 else n
if n < 0 or n >= self.length:
raise IndexError("Index out of range")
i = n // self.block_length
j = n % self.block_length
if self.cur_block_idx != i:
self.cur_block = self.load_block(i)
self.cur_block_idx = i
return unpack(self.cur_block[j])
def get_slice(self, *args):
"""
Return iterable for the given range.
"""
_block_length = self.block_length
start, stop, step = slice(*args).indices(self.length)
# Find the number of items in slice
n = (stop - start) // step
if n <= 0:
return
# Check if begin and end indexes are in range
if start < 0 or start >= self.length:
raise IndexError("Index out of range")
end = start + (n - 1) * step
if end < 0 or end >= self.length:
raise IndexError("Index out of range")
# Do the actual loop
# This doesn't use the class's cur_block
cur_block_idx = -1
cur_block = None
for n in xrange(start, stop, step):
i = n // _block_length
j = n % _block_length
if cur_block_idx != i:
cur_block = self.load_block(i)
cur_block_idx = i
yield unpack(cur_block[j])
def get_idxs(self, ns):
"""
Get the values at given idxs.
NOTE: if the indexes are not sorted,
performance may be really slow.
"""
_block_length = self.block_length
cur_block_idx = -1
cur_block = None
for n in ns:
n = (self.length + n) if n < 0 else n
if n < 0 or n >= self.length:
raise IndexError("Index out of range")
i = n // _block_length
j = n % _block_length
if cur_block_idx != i:
cur_block = self.load_block(i)
cur_block_idx = i
yield unpack(cur_block[j])
def __iter__(self):
for i in xrange(self.num_blocks):
cur_block = self.load_block(i)
for item in cur_block:
yield unpack(item)
def __getitem__(self, key):
if isinstance(key, slice):
return list(self.get_slice(key.start, key.stop, key.step))
elif isinstance(key, (list, tuple)):
return list(self.get_idxs(key))
else:
return self.get_idx(key)
class DatasetWriter(Closes):
"""
Writes a dataset object to a file.
"""
def __init__(self, fname, create=True, block_length=1,
comp_format="lz4", comp_level=6):
self.closed = True
# Check the parameters
block_length = int(block_length)
if block_length < 1:
raise ValueError("Block length must be at-least 1")
if comp_format not in COMP_TABLE:
raise IOError("Unknown compression: %s" % comp_format)
comp_level = int(comp_level)
if not 1 <= comp_level <= 9:
raise ValueError("Invalid compression level: %d" % comp_level)
self.fname = fname
self.store = DataStore(fname, write=True, create=create)
try:
if create:
self.block_length = block_length
self.length = 0
self.comp_format = comp_format
self.comp_level = comp_level
self.write_meta(True)
else:
if self.store.version != VERSION:
raise IOError("Invalid version: %d" % self.store.version)
self.block_length = self.store.block_length
self.length = self.store.length
self.comp_format = self.store.comp_format
self.comp_level = self.store.comp_level
self.comp_fn, self.decomp_fn = COMP_TABLE[self.comp_format]
self.closed = False
except:
self.store.close()
raise
# number of blocks already present in the dataset
self.num_blocks = self.length // self.block_length
self.num_blocks += bool(self.length % self.block_length)
if self.length % self.block_length == 0:
self.cur_block = []
else:
self.cur_block = self.load_block(self.num_blocks -1)
self.num_blocks -= 1
def write_meta(self, full=False):
"""
Write meta information.
"""
if full:
self.store.version = VERSION
self.store.block_length = self.block_length
self.store.comp_format = self.comp_format
self.store.comp_level = self.comp_level
self.store.length = self.length
def load_block(self, i):
"""
Load a block from the given file.
"""
block_hcomp = self.store.get(i)
if block_hcomp is None:
raise IOError("Block %d not in store" % i)
try:
block_raw = decomp_block(block_hcomp, self.decomp_fn)
except IOError as e:
raise IOError("Block %d: %s", (i, e)), None, sys.exc_info()[2]
return unpack(block_raw)
def dump_block(self, i, block):
"""
Write the block to the store.
"""
block_raw = pack(block)
block_hcomp = comp_block(block_raw, self.comp_fn, self.comp_level)
self.store.put(i, block_hcomp)
self.write_meta()
def flush(self, force=False):
"""
Flush the current block to output file.
"""
if len(self.cur_block) != self.block_length and not force:
raise ValueError("Cant flush unfilled block without forcing")
if not self.cur_block:
return
self.dump_block(self.num_blocks, self.cur_block)
self.num_blocks += 1
self.cur_block = []
def close(self):
if not self.closed:
self.flush(force=True)
self.store.close()
self.closed = True
def append(self, obj):
"""
Append the object to database.
"""
self.cur_block.append(pack(obj))
self.length += 1
if len(self.cur_block) == self.block_length:
self.flush()
def extend(self, iterable):
for item in iterable:
self.cur_block.append(pack(item))
self.length += 1
if len(self.cur_block) == self.block_length:
self.flush()
def open(fname, mode="r", block_length=None, comp_format="lz4", comp_level=6):
# pylint: disable=redefined-builtin
"""
Open a dataset for reading or writing.
"""
if mode == "r":
return DatasetReader(fname)
elif mode == "w":
if block_length is None:
raise ValueError("Must specify block_length for write mode")
return DatasetWriter(fname, True, block_length, comp_format, comp_level)
elif mode == "a":
return DatasetWriter(fname, False)
else:
raise ValueError("Invalid mode '%s'" % mode)
| mit | 649,836,582,525,200,100 | 27.822612 | 80 | 0.532328 | false | 3.702053 | false | false | false |
cactusbin/nyt | matplotlib/lib/matplotlib/backend_bases.py | 1 | 106941 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
"""
from __future__ import division, print_function
import os
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
try:
from PIL import Image
_has_pil = True
except ImportError:
_has_pil = False
_backend_d = {}
def register_backend(format, backend_class):
_backend_d[format] = backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside "ipython --pylab";
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in --pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with `ipython --pylab` until proper integration
# is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (all_transforms[i % Ntransforms] +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im):
"""
Draw the image instance into the current axes;
*gc*
a GraphicsContext containing clipping information
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary
scaling of image (most of the vector backend).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid': (None, None),
'dashed': (0, (6.0, 6.0)),
'dashdot': (0, (3.0, 5.0, 1.0, 5.0)),
'dotted': (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._orig_color = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._orig_color = gc._orig_color
self._hatch = gc._hatch
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._orig_color)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
self._orig_color = fg
if self._forced_alpha:
self._rgb = colors.colorConverter.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._orig_color = frac
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). One may specify customized dash styles by providing
a tuple of (offset, dash pairs). For example, the predefiend
linestyles have following values.:
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
"""
if style in self.dashd:
offset, dashes = self.dashd[style]
elif isinstance(style, tuple):
offset, dashes = style
else:
raise ValueError('Unrecognized linestyle: %s' % str(style))
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%d " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print('on pick line:', zip(xdata[ind], ydata[ind]))
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_saving = False
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event', self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [(h.zorder, h) for h in artists]
L.sort()
return [h for zorder, h in L]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
#print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'LaTeX PGF Figure',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
# TODO: these print_* throw ImportErrror when called from
# compare_images_decorator (decorators.py line 112)
# if the backend has not already been loaded earlier on. Simple trigger:
# >>> import matplotlib.tests.test_spines
# >>> list(matplotlib.tests.test_spines.test_spines_axes_positions())[0][0]()
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_pgf(self, *args, **kwargs):
from backends.backend_pgf import FigureCanvasPgf # lazy import
pgf = self.switch_backends(FigureCanvasPgf)
return pgf.print_pgf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgba = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
if _has_pil:
filetypes['jpg'] = 'Joint Photographic Experts Group'
filetypes['jpeg'] = filetypes['jpg']
def print_jpg(self, filename_or_obj, *args, **kwargs):
"""
Supported kwargs:
*quality*: The image quality, on a scale from 1 (worst) to
95 (best). The default is 95, if not given in the
matplotlibrc file in the savefig.jpeg_quality parameter.
Values above 95 should be avoided; 100 completely
disables the JPEG quantization stage.
*optimize*: If present, indicates that the encoder should
make an extra pass over the image in order to select
optimal encoder settings.
*progressive*: If present, indicates that this image
should be stored as a progressive JPEG file.
"""
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False):
return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
options = cbook.restrict_dict(kwargs, ['quality', 'optimize',
'progressive'])
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
return image.save(filename_or_obj, format='jpeg', **options)
print_jpeg = print_jpg
filetypes['tif'] = filetypes['tiff'] = 'Tagged Image File Format'
def print_tif(self, filename_or_obj, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False):
return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
dpi = (self.figure.dpi, self.figure.dpi)
return image.save(filename_or_obj, format='tiff',
dpi=dpi)
print_tiff = print_tif
def get_supported_filetypes(self):
"""Return dict of savefig file formats supported by this backend"""
return self.filetypes
def get_supported_filetypes_grouped(self):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in self.filetypes.iteritems():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_print_method(self, format):
method_name = 'print_%s' % format
# check for registered backends
if format in _backend_d:
backend_class = _backend_d[format]
def _print_method(*args, **kwargs):
backend = self.switch_backends(backend_class)
print_method = getattr(backend, method_name)
return print_method(*args, **kwargs)
return _print_method
formats = self.get_supported_filetypes()
if (format not in formats or not hasattr(self, method_name)):
formats = sorted(formats)
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
return getattr(self, method_name)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
print_method = self._get_print_method(format)
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, format,
bbox_inches)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
self._is_saving = True
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_filename = self.get_window_title() or 'image'
default_filename = default_filename.lower().replace(' ', '_')
return default_filename + '.' + self.get_default_filetype()
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode (default key 'f')
if event.key in fullscreen_keys:
canvas.manager.full_screen_toggle()
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase:
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.key_press_handler_id = self.canvas.mpl_connect('key_press_event',
self.key_press)
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (eg, a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (eg, a PS backend).
"""
pass
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a.viewLim.frozen(),
a.transData.frozen()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
lims = []
pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append((xmin, xmax, ymin, ymax))
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point((lastx, lasty))
x, y = inverse.transform_point((x, y))
Xmin, Xmax = a.get_xlim()
Ymin, Ymax = a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x < lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 < Xmin:
x0 = Xmin
if x1 > Xmax:
x1 = Xmax
else:
if x > lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 > Xmin:
x0 = Xmin
if x1 < Xmax:
x1 = Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y < lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 < Ymin:
y0 = Ymin
if y1 > Ymax:
y1 = Ymax
else:
if y > lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 > Ymin:
y0 = Ymin
if y1 < Ymax:
y1 = Ymax
if self._button_pressed == 1:
if self._zoom_mode == "x":
a.set_xlim((x0, x1))
elif self._zoom_mode == "y":
a.set_ylim((y0, y1))
else:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale() == 'log':
alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)
rx1 = pow(Xmin / x0, alpha) * Xmin
rx2 = pow(Xmax / x0, alpha) * Xmin
else:
alpha = (Xmax - Xmin) / (x1 - x0)
rx1 = alpha * (Xmin - x0) + Xmin
rx2 = alpha * (Xmax - x0) + Xmin
if a.get_yscale() == 'log':
alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)
ry1 = pow(Ymin / y0, alpha) * Ymin
ry2 = pow(Ymax / y0, alpha) * Ymin
else:
alpha = (Ymax - Ymin) / (y1 - y0)
ry1 = alpha * (Ymin - y0) + Ymin
ry2 = alpha * (Ymax - y0) + Ymin
if self._zoom_mode == "x":
a.set_xlim((rx1, rx2))
elif self._zoom_mode == "y":
a.set_ylim((ry1, ry2))
else:
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
lims = self._views()
if lims is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.draw()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
| unlicense | 6,469,178,272,311,308,000 | 32.419063 | 81 | 0.562338 | false | 4.196068 | false | false | false |
jeeftor/alfredToday | src/lib/pyexchange/exchange2010/soap_request.py | 1 | 19212 | """
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from lxml.builder import ElementMaker
from ..utils import convert_datetime_to_utc
from ..compat import _unicode
MSG_NS = u'http://schemas.microsoft.com/exchange/services/2006/messages'
TYPE_NS = u'http://schemas.microsoft.com/exchange/services/2006/types'
SOAP_NS = u'http://schemas.xmlsoap.org/soap/envelope/'
NAMESPACES = {u'm': MSG_NS, u't': TYPE_NS, u's': SOAP_NS}
M = ElementMaker(namespace=MSG_NS, nsmap=NAMESPACES)
T = ElementMaker(namespace=TYPE_NS, nsmap=NAMESPACES)
EXCHANGE_DATETIME_FORMAT = u"%Y-%m-%dT%H:%M:%SZ"
EXCHANGE_DATE_FORMAT = u"%Y-%m-%d"
DISTINGUISHED_IDS = (
'calendar', 'contacts', 'deleteditems', 'drafts', 'inbox', 'journal', 'notes', 'outbox', 'sentitems',
'tasks', 'msgfolderroot', 'root', 'junkemail', 'searchfolders', 'voicemail', 'recoverableitemsroot',
'recoverableitemsdeletions', 'recoverableitemsversions', 'recoverableitemspurges', 'archiveroot',
'archivemsgfolderroot', 'archivedeleteditems', 'archiverecoverableitemsroot',
'Archiverecoverableitemsdeletions', 'Archiverecoverableitemsversions', 'Archiverecoverableitemspurges',
)
def exchange_header():
return T.RequestServerVersion({u'Version': u'Exchange2010'})
def resource_node(element, resources):
"""
Helper function to generate a person/conference room node from an email address
<t:OptionalAttendees>
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
</t:OptionalAttendees>
"""
for attendee in resources:
element.append(
T.Attendee(
T.Mailbox(
T.EmailAddress(attendee.email)
)
)
)
return element
def delete_field(field_uri):
"""
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of
appending.
<t:DeleteItemField>
<t:FieldURI FieldURI="calendar:Resources"/>
</t:DeleteItemField>
"""
root = T.DeleteItemField(
T.FieldURI(FieldURI=field_uri)
)
return root
def get_item(exchange_id, format=u"Default"):
"""
Requests a calendar item from the store.
exchange_id is the id for this event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
<t:ItemId Id="{exchange_id}"/>
</m:ItemIds>
</m:GetItem>
"""
elements = list()
if type(exchange_id) == list:
for item in exchange_id:
elements.append(T.ItemId(Id=item))
else:
elements = [T.ItemId(Id=exchange_id)]
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds(
*elements
)
)
return root
def get_calendar_items(format=u"Default", start=None, end=None, max_entries=999999):
start = start.strftime(EXCHANGE_DATETIME_FORMAT)
end = end.strftime(EXCHANGE_DATETIME_FORMAT)
root = M.FindItem(
{u'Traversal': u'Shallow'},
M.ItemShape(
T.BaseShape(format)
),
M.CalendarView({
u'MaxEntriesReturned': _unicode(max_entries),
u'StartDate': start,
u'EndDate': end,
}),
M.ParentFolderIds(T.DistinguishedFolderId(Id=u"calendar")),
)
return root
def get_master(exchange_id, format=u"Default"):
"""
Requests a calendar item from the store.
exchange_id is the id for this event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
<t:RecurringMasterItemId OccurrenceId="{exchange_id}"/>
</m:ItemIds>
</m:GetItem>
"""
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds(
T.RecurringMasterItemId(OccurrenceId=exchange_id)
)
)
return root
def get_occurrence(exchange_id, instance_index, format=u"Default"):
"""
Requests one or more calendar items from the store matching the master & index.
exchange_id is the id for the master event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
GetItem Doc:
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
OccurrenceItemId Doc:
http://msdn.microsoft.com/en-us/library/office/aa580744(v=exchg.150).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
{% for index in instance_index %}
<t:OccurrenceItemId RecurringMasterId="{exchange_id}" InstanceIndex="{{ index }}"/>
{% endfor %}
</m:ItemIds>
</m:GetItem>
"""
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds()
)
items_node = root.xpath("//m:ItemIds", namespaces=NAMESPACES)[0]
for index in instance_index:
items_node.append(T.OccurrenceItemId(RecurringMasterId=exchange_id, InstanceIndex=str(index)))
return root
def get_folder(folder_id, format=u"Default"):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.GetFolder(
M.FolderShape(
T.BaseShape(format)
),
M.FolderIds(id)
)
return root
def new_folder(folder):
id = T.DistinguishedFolderId(Id=folder.parent_id) if folder.parent_id in DISTINGUISHED_IDS else T.FolderId(Id=folder.parent_id)
if folder.folder_type == u'Folder':
folder_node = T.Folder(T.DisplayName(folder.display_name))
elif folder.folder_type == u'CalendarFolder':
folder_node = T.CalendarFolder(T.DisplayName(folder.display_name))
root = M.CreateFolder(
M.ParentFolderId(id),
M.Folders(folder_node)
)
return root
def find_folder(parent_id, format=u"Default"):
id = T.DistinguishedFolderId(Id=parent_id) if parent_id in DISTINGUISHED_IDS else T.FolderId(Id=parent_id)
root = M.FindFolder(
{u'Traversal': u'Shallow'},
M.FolderShape(
T.BaseShape(format)
),
M.ParentFolderIds(id)
)
return root
def delete_folder(folder):
root = M.DeleteFolder(
{u'DeleteType': 'HardDelete'},
M.FolderIds(
T.FolderId(Id=folder.id)
)
)
return root
def new_event(event):
"""
Requests a new event be created in the store.
http://msdn.microsoft.com/en-us/library/aa564690(v=exchg.140).aspx
<m:CreateItem SendMeetingInvitations="SendToAllAndSaveCopy"
xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:SavedItemFolderId>
<t:DistinguishedFolderId Id="calendar"/>
</m:SavedItemFolderId>
<m:Items>
<t:CalendarItem>
<t:Subject>{event.subject}</t:Subject>
<t:Body BodyType="HTML">{event.subject}</t:Body>
<t:Start></t:Start>
<t:End></t:End>
<t:Location></t:Location>
<t:RequiredAttendees>
{% for attendee_email in meeting.required_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
HTTPretty {% endfor %}
</t:RequiredAttendees>
{% if meeting.optional_attendees %}
<t:OptionalAttendees>
{% for attendee_email in meeting.optional_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
{% endfor %}
</t:OptionalAttendees>
{% endif %}
{% if meeting.conference_room %}
<t:Resources>
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ meeting.conference_room.email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
</t:Resources>
{% endif %}
</t:CalendarItem>
</m:Items>
</m:CreateItem>
"""
id = T.DistinguishedFolderId(Id=event.calendar_id) if event.calendar_id in DISTINGUISHED_IDS else T.FolderId(Id=event.calendar_id)
start = convert_datetime_to_utc(event.start)
end = convert_datetime_to_utc(event.end)
root = M.CreateItem(
M.SavedItemFolderId(id),
M.Items(
T.CalendarItem(
T.Subject(event.subject),
T.Body(event.body or u'', BodyType="HTML"),
)
),
SendMeetingInvitations="SendToAllAndSaveCopy"
)
calendar_node = root.xpath(u'/m:CreateItem/m:Items/t:CalendarItem', namespaces=NAMESPACES)[0]
if event.reminder_minutes_before_start:
calendar_node.append(T.ReminderIsSet('true'))
calendar_node.append(T.ReminderMinutesBeforeStart(str(event.reminder_minutes_before_start)))
else:
calendar_node.append(T.ReminderIsSet('false'))
calendar_node.append(T.Start(start.strftime(EXCHANGE_DATETIME_FORMAT)))
calendar_node.append(T.End(end.strftime(EXCHANGE_DATETIME_FORMAT)))
if event.is_all_day:
calendar_node.append(T.IsAllDayEvent('true'))
calendar_node.append(T.Location(event.location or u''))
if event.required_attendees:
calendar_node.append(resource_node(element=T.RequiredAttendees(), resources=event.required_attendees))
if event.optional_attendees:
calendar_node.append(resource_node(element=T.OptionalAttendees(), resources=event.optional_attendees))
if event.resources:
calendar_node.append(resource_node(element=T.Resources(), resources=event.resources))
if event.recurrence:
if event.recurrence == u'daily':
recurrence = T.DailyRecurrence(
T.Interval(str(event.recurrence_interval)),
)
elif event.recurrence == u'weekly':
recurrence = T.WeeklyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DaysOfWeek(event.recurrence_days),
)
elif event.recurrence == u'monthly':
recurrence = T.AbsoluteMonthlyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DayOfMonth(str(event.start.day)),
)
elif event.recurrence == u'yearly':
recurrence = T.AbsoluteYearlyRecurrence(
T.DayOfMonth(str(event.start.day)),
T.Month(event.start.strftime("%B")),
)
calendar_node.append(
T.Recurrence(
recurrence,
T.EndDateRecurrence(
T.StartDate(event.start.strftime(EXCHANGE_DATE_FORMAT)),
T.EndDate(event.recurrence_end_date.strftime(EXCHANGE_DATE_FORMAT)),
)
)
)
return root
def delete_event(event):
"""
Requests an item be deleted from the store.
<DeleteItem
xmlns="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
DeleteType="HardDelete"
SendMeetingCancellations="SendToAllAndSaveCopy"
AffectedTaskOccurrences="AllOccurrences">
<ItemIds>
<t:ItemId Id="{{ id }}" ChangeKey="{{ change_key }}"/>
</ItemIds>
</DeleteItem>
"""
root = M.DeleteItem(
M.ItemIds(
T.ItemId(Id=event.id, ChangeKey=event.change_key)
),
DeleteType="HardDelete",
SendMeetingCancellations="SendToAllAndSaveCopy",
AffectedTaskOccurrences="AllOccurrences"
)
return root
def move_event(event, folder_id):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.MoveItem(
M.ToFolderId(id),
M.ItemIds(
T.ItemId(Id=event.id, ChangeKey=event.change_key)
)
)
return root
def move_folder(folder, folder_id):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.MoveFolder(
M.ToFolderId(id),
M.FolderIds(
T.FolderId(Id=folder.id)
)
)
return root
def update_property_node(node_to_insert, field_uri):
""" Helper function - generates a SetItemField which tells Exchange you want to overwrite the contents of a field."""
root = T.SetItemField(
T.FieldURI(FieldURI=field_uri),
T.CalendarItem(node_to_insert)
)
return root
def update_item(event, updated_attributes, calendar_item_update_operation_type):
""" Saves updates to an event in the store. Only request changes for attributes that have actually changed."""
root = M.UpdateItem(
M.ItemChanges(
T.ItemChange(
T.ItemId(Id=event.id, ChangeKey=event.change_key),
T.Updates()
)
),
ConflictResolution=u"AlwaysOverwrite",
MessageDisposition=u"SendAndSaveCopy",
SendMeetingInvitationsOrCancellations=calendar_item_update_operation_type
)
update_node = root.xpath(u'/m:UpdateItem/m:ItemChanges/t:ItemChange/t:Updates', namespaces=NAMESPACES)[0]
# if not send_only_to_changed_attendees:
# # We want to resend invites, which you do by setting an attribute to the same value it has. Right now, events
# # are always scheduled as Busy time, so we just set that again.
# update_node.append(
# update_property_node(field_uri="calendar:LegacyFreeBusyStatus", node_to_insert=T.LegacyFreeBusyStatus("Busy"))
# )
if u'html_body' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Body", node_to_insert=T.Body(event.html_body, BodyType="HTML"))
)
if u'text_body' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Body", node_to_insert=T.Body(event.text_body, BodyType="Text"))
)
if u'subject' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Subject", node_to_insert=T.Subject(event.subject))
)
if u'start' in updated_attributes:
start = convert_datetime_to_utc(event.start)
update_node.append(
update_property_node(field_uri="calendar:Start", node_to_insert=T.Start(start.strftime(EXCHANGE_DATETIME_FORMAT)))
)
if u'end' in updated_attributes:
end = convert_datetime_to_utc(event.end)
update_node.append(
update_property_node(field_uri="calendar:End", node_to_insert=T.End(end.strftime(EXCHANGE_DATETIME_FORMAT)))
)
if u'location' in updated_attributes:
update_node.append(
update_property_node(field_uri="calendar:Location", node_to_insert=T.Location(event.location))
)
if u'online_meeting' in updated_attributes:
print "Not yet Implemented"
pass
if u'attendees' in updated_attributes:
if event.required_attendees:
required = resource_node(element=T.RequiredAttendees(), resources=event.required_attendees)
update_node.append(
update_property_node(field_uri="calendar:RequiredAttendees", node_to_insert=required)
)
else:
update_node.append(delete_field(field_uri="calendar:RequiredAttendees"))
if event.optional_attendees:
optional = resource_node(element=T.OptionalAttendees(), resources=event.optional_attendees)
update_node.append(
update_property_node(field_uri="calendar:OptionalAttendees", node_to_insert=optional)
)
else:
update_node.append(delete_field(field_uri="calendar:OptionalAttendees"))
if u'resources' in updated_attributes:
if event.resources:
resources = resource_node(element=T.Resources(), resources=event.resources)
update_node.append(
update_property_node(field_uri="calendar:Resources", node_to_insert=resources)
)
else:
update_node.append(delete_field(field_uri="calendar:Resources"))
if u'reminder_minutes_before_start' in updated_attributes:
if event.reminder_minutes_before_start:
update_node.append(
update_property_node(field_uri="item:ReminderIsSet", node_to_insert=T.ReminderIsSet('true'))
)
update_node.append(
update_property_node(
field_uri="item:ReminderMinutesBeforeStart",
node_to_insert=T.ReminderMinutesBeforeStart(str(event.reminder_minutes_before_start))
)
)
else:
update_node.append(
update_property_node(field_uri="item:ReminderIsSet", node_to_insert=T.ReminderIsSet('false'))
)
if u'is_all_day' in updated_attributes:
update_node.append(
update_property_node(field_uri="calendar:IsAllDayEvent", node_to_insert=T.IsAllDayEvent(str(event.is_all_day).lower()))
)
for attr in event.RECURRENCE_ATTRIBUTES:
if attr in updated_attributes:
recurrence_node = T.Recurrence()
if event.recurrence == 'daily':
recurrence_node.append(
T.DailyRecurrence(
T.Interval(str(event.recurrence_interval)),
)
)
elif event.recurrence == 'weekly':
recurrence_node.append(
T.WeeklyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DaysOfWeek(event.recurrence_days),
)
)
elif event.recurrence == 'monthly':
recurrence_node.append(
T.AbsoluteMonthlyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DayOfMonth(str(event.start.day)),
)
)
elif event.recurrence == 'yearly':
recurrence_node.append(
T.AbsoluteYearlyRecurrence(
T.DayOfMonth(str(event.start.day)),
T.Month(event.start.strftime("%B")),
)
)
recurrence_node.append(
T.EndDateRecurrence(
T.StartDate(event.start.strftime(EXCHANGE_DATE_FORMAT)),
T.EndDate(event.recurrence_end_date.strftime(EXCHANGE_DATE_FORMAT)),
)
)
update_node.append(
update_property_node(field_uri="calendar:Recurrence", node_to_insert=recurrence_node)
)
return root
| mit | 973,815,431,249,674,600 | 30.087379 | 212 | 0.656725 | false | 3.440544 | false | false | false |
remap/fountainhead | src/fountain_script.py | 1 | 1679 | # -*- Mode:python c-file-style:"gnu" indent-tabs-mode:nil -*- */
#
# Copyright (C) 2014-2015 Regents of the University of California.
# Author: Zhehao Wang <wangzhehao410305gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# A copy of the GNU General Public License is in the file COPYING.
# This module defines the script class, which takes a file name, and constructs
# elements structure by calling the parser. Scripts contains an element array, and
# a title elements dictionary; both are used by html generator
# Ported to Python from objc in nyousefi/Fountain repository
from fountain_parser import Parser, ParserVersion
class FountainScript(object):
def __init__(self, fileName = '', parserVersion = ParserVersion.DEFAULT):
if (fileName == ''):
return
self._fileName = fileName
# This parser is not optimized
parser = Parser(parserVersion)
self._elements = parser.parseBodyOfFile(self._fileName)
self._titlePageContents = parser.parseTitlePageOfFile(self._fileName)
return
| gpl-3.0 | -16,898,439,618,260,620 | 41 | 82 | 0.723049 | false | 4.095122 | false | false | false |
tumi8/sKnock | common/modules/Utils.py | 1 | 1624 | import errno
import stat
import pwd
import os
from OpenSSL import crypto
def convertDERtoPEM(key):
return crypto.dump_publickey(crypto.FILETYPE_PEM, crypto.load_publickey(crypto.FILETYPE_ASN1, key))
def convertPEMtoDER(key):
return crypto.dump_publickey(crypto.FILETYPE_ASN1, crypto.load_publickey(crypto.FILETYPE_PEM, key))
def touch(path):
"""
Creates a file at the given path.
If the directories in the given path are not existing, they are created
recursively with the permissions on each of them deriving from the umask,
but with an execute permission for others. The created file will be owned
by `nobody`
If the path already exists then the ownership is changed to `nobody`.
Throws OSError in case the given path is a directory, or upon no sufficient
disk space
"""
f_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
try:
mode = os.stat(path).st_mode
except os.error as e:
if errno.ENOENT != e.errno:
raise e
mask = os.umask(0)
os.umask(mask ^ 1) # enable dir access for others
try:
os.makedirs(os.path.dirname(path))
except os.error as e:
if errno.EEXIST != e.errno:
raise e
finally:
os.umask(mask)
f = os.open(path, os.O_CREAT, f_mode)
os.close(f)
else:
f_mode = f_mode | mode & 0o777
os.chmod(path, f_mode)
# File will either be created or already existing by now change the
# ownership of the file to nobody
user = pwd.getpwnam('nobody')
os.chown(path, user.pw_uid, -1)
| gpl-3.0 | 235,730,516,789,654,750 | 30.230769 | 103 | 0.644089 | false | 3.569231 | false | false | false |
hasadna/open-shot | polyorg/migrations/0001_initial.py | 1 | 9185 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CandidateList'
db.create_table(u'polyorg_candidatelist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
('ballot', self.gf('django.db.models.fields.CharField')(max_length=4)),
('number_of_seats', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('surplus_partner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polyorg.CandidateList'], null=True, blank=True)),
('mpg_html_report', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('img_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('youtube_user', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('wikipedia_page', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('twitter_account', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('facebook_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('platform', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'polyorg', ['CandidateList'])
# Adding model 'Party'
db.create_table(u'polyorg_party', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('accepts_memberships', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'polyorg', ['Party'])
# Adding model 'Candidate'
db.create_table(u'polyorg_candidate', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('candidate_list', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polyorg.CandidateList'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('ordinal', self.gf('django.db.models.fields.IntegerField')()),
('party', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polyorg.Party'], null=True, blank=True)),
('votes', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='S', max_length=1)),
))
db.send_create_signal(u'polyorg', ['Candidate'])
def backwards(self, orm):
# Deleting model 'CandidateList'
db.delete_table(u'polyorg_candidatelist')
# Deleting model 'Party'
db.delete_table(u'polyorg_party')
# Deleting model 'Candidate'
db.delete_table(u'polyorg_candidate')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'polyorg.candidate': {
'Meta': {'ordering': "('ordinal',)", 'object_name': 'Candidate'},
'candidate_list': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.CandidateList']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.Party']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'S'", 'max_length': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'polyorg.candidatelist': {
'Meta': {'object_name': 'CandidateList'},
'ballot': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'candidates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'through': u"orm['polyorg.Candidate']", 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mpg_html_report': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'platform': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'surplus_partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.CandidateList']", 'null': 'True', 'blank': 'True'}),
'twitter_account': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'wikipedia_page': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'youtube_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'polyorg.party': {
'Meta': {'object_name': 'Party'},
'accepts_memberships': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['polyorg'] | bsd-3-clause | 8,433,865,259,077,570,000 | 69.122137 | 208 | 0.573435 | false | 3.58649 | false | false | false |
christabor/MoAL | MOAL/data_structures/graphs/hypergraph.py | 1 | 1461 | # -*- coding: utf-8 -*-
__author__ = """Chris Tabor ([email protected])"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import print_h3
from MOAL.helpers import datamaker as dmkr
from MOAL.data_structures.graphs.graphs import Graph
DEBUG = True if __name__ == '__main__' else False
class HypgerGraph(Graph):
"""
From mathworld.wolfram.com/Hypergraph.html:
"A hypergraph is a graph in which generalized edges (called hyperedges)
may connect more than two nodes."
Also interesting, from en.wikipedia.org/wiki/Hypergraph
"The collection of hypergraphs is a category with hypergraph
homomorphisms as morphisms."
"""
if DEBUG:
with Section('Multi-graph'):
hypergraph = HypgerGraph(dmkr.random_graph(max_edges=10))
print_h3('Random multi-graph')
print(hypergraph)
# Multiple edges pointing to each other
hypergraph2 = HypgerGraph({
0: {'edges': [1, 2, 3], 'val': 'A'},
1: {'edges': [0, 3, 2, 1], 'val': 'B'},
2: {'edges': [0, 1, 3, 2], 'val': 'C'},
3: {'edges': [0, 1, 2, 3], 'val': 'D'},
})
print(hypergraph2)
if raw_input('Save graph images? Y/N: ') == 'Y':
hypergraph.render_graph('hypergraph-test.png')
hypergraph2.render_graph('hypergraph2-test.png')
| apache-2.0 | -2,256,337,737,574,232,800 | 31.466667 | 75 | 0.609856 | false | 3.366359 | false | false | false |
DOAJ/doaj | portality/scripts/sage_update.py | 1 | 4491 | import csv
from openpyxl import load_workbook
from portality.models import Journal
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="path to SAGE spreadsheet", required=True)
parser.add_argument("-o", "--out", help="output file path", required=True)
args = parser.parse_args()
with open(args.out, "w", encoding="utf-8") as f:
writer = csv.writer(f)
wb = load_workbook(args.infile)
sheet = wb['sage_journals']
# Loop through all rows of the spreadsheet and update the journals (skipping row 1, the heading row)
for r in range(2, sheet.max_row+1):
j = Journal.pull(sheet.cell(row=r, column=1).value)
if j is not None:
if sheet.cell(row=r, column=1).value != j.id or sheet.cell(row=r, column=2).value != j.bibjson().title:
# if title of the journal in the sheet and in the system do not match - ignore
writer.writerow(["Id of requested journal does not match its title. Id: " +
sheet.cell(row=r, column=1).value + ", journal ignored"])
else:
fulltext_url = sheet.cell(row=r, column=3).value
apc_url = sheet.cell(row=r, column=4).value
submission_url = sheet.cell(row=r, column=5).value
editorial_board_url = sheet.cell(row=r, column=6).value
review_process_url = sheet.cell(row=r, column=7).value
aims_scope_url = sheet.cell(row=r, column=8).value
author_instructions = sheet.cell(row=r, column=9).value
plagiarism_url = sheet.cell(row=r, column=10).value
oa_url = sheet.cell(row=r, column=11).value
license_url = sheet.cell(row=r, column=12).value
jbib = j.bibjson()
if fulltext_url is not None:
jbib.remove_urls("homepage")
jbib.add_url(fulltext_url, "homepage")
if apc_url is not None:
jbib.apc_url = apc_url
if submission_url is not None:
jbib.submission_charges_url = submission_url
if editorial_board_url is not None:
jbib.remove_urls("editorial_board")
jbib.add_url(editorial_board_url, "editorial_board")
if review_process_url is not None:
jbib.set_editorial_review(jbib.editorial_review["process"], editorial_board_url)
if aims_scope_url is not None:
jbib.remove_urls("aims_scope")
jbib.add_url(aims_scope_url, "aims_scope")
if author_instructions is not None:
jbib.remove_urls("author_instructions")
jbib.add_url(author_instructions, "author_instructions")
if plagiarism_url is not None:
jbib.set_plagiarism_detection(plagiarism_url)
if oa_url is not None:
jbib.remove_urls("oa_statement")
jbib.add_url(oa_url, "oa_statement")
if license_url is not None:
current_license = jbib.get_license()
jbib.set_license(license_title=current_license["title"],
license_type=current_license["type"],
url=license_url,
open_access=current_license["open_access"],
by=current_license["BY"],
sa=current_license["SA"],
nc=current_license["NC"],
nd=current_license["ND"],
embedded=current_license["embedded"],
embedded_example_url=current_license["embedded_example_url"])
j.save(blocking=True)
else:
# if journal's id is not found in the system
writer.writerow(["Journal not found: " + sheet.cell(row=r, column=1).value])
# finished
writer.writerow(["Finished."])
| apache-2.0 | 8,343,009,261,149,139,000 | 47.290323 | 119 | 0.498775 | false | 4.322425 | false | false | false |
Outernet-Project/librarian | librarian/routes/firmware.py | 1 | 1624 | import logging
from bottle_utils.i18n import lazy_gettext as _
from streamline import XHRPartialFormRoute, RouteBase
from ..core.contrib.templates.renderer import template
from ..core.exts import ext_container as exts
from ..forms.firmware import FirmwareUpdateForm
from ..helpers.firmware import update_firmware, FIRMWARE_UPDATE_KEY
from ..utils.route_mixins import JSONResponseMixin
class FirmwareUpdate(XHRPartialFormRoute):
name = 'firmware:update'
path = '/firmware/'
template_func = template
template_name = 'firmware/update'
partial_template_name = 'firmware/_update'
form_factory = FirmwareUpdateForm
def get_bound_form(self):
form_factory = self.get_form_factory()
return form_factory(self.request.files)
def form_invalid(self):
return dict(saved=False)
def form_valid(self):
exts.cache.set(FIRMWARE_UPDATE_KEY, 'processing')
firmware = self.form.processed_data['firmware']
try:
path = exts.config['firmware.save_path']
exts.tasks.schedule(update_firmware, args=(firmware, path))
except Exception:
logging.exception('Firmware upload error.')
# Translators, shown when firmware upload failed
return dict(saved=False,
message=_('Firmware upload failed.'))
else:
return dict(saved=True)
class FirmwareUpdateStatus(JSONResponseMixin, RouteBase):
name = 'firmware:status'
path = '/firmware/status/'
def get(self):
status = exts.cache.get(FIRMWARE_UPDATE_KEY)
return dict(status=status)
| gpl-3.0 | 2,844,891,010,243,496,000 | 32.142857 | 71 | 0.678571 | false | 4.10101 | false | false | false |
mhvlug/ical2mailman | ical2mailman.py | 1 | 3412 | #!/usr/bin/env python
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import icalendar
import re
import robobrowser
import time
import urllib2
import yaml
def next_meetings(count=3):
"""Find the next N meetings from our ical.
After getting the ical, run through it looking for Events,
which are in the future, and include 'meetings' in the url,
which means they are Drupal meeting types, and not other
kinds of events like Lunch or Conferences.
Because we know that July (7) and August (8) we'll be at
Lourdes, add an annotation to the events in those months. People
seem to use the email footer for more info than I'd expect so
hopefully this means less people getting lost.
"""
f = urllib2.urlopen("https://mhvlug.org/calendar/ical")
ical = f.read()
cal = icalendar.Calendar()
cal = cal.from_ical(ical)
now = datetime.datetime.now()
found = 0
meetings = []
for event in cal.subcomponents:
if found >= count:
break
if type(event) != icalendar.cal.Event:
continue
# oh time...
dt = event['DTSTART'].dt
then = datetime.datetime.fromtimestamp(time.mktime(dt.utctimetuple()))
if then < now:
continue
if re.search('meetings', event['URL']):
meeting = (" %s - %s" % (
dt.strftime("%b %e"), event['SUMMARY'].title()))
if dt.month == 7:
meeting += " @ Lourdes"
meetings.append(meeting)
found += 1
return meetings
def update_mailman(meetings, passwd=""):
"""Log into mailman and update the footer with meetings.
Using python mechanize log into the mailman admin interface, strip
off the end of the footer and replace it with the updated list of meetings.
The text for this is hardcoded based on our needs, but it's at least
a pretty good example of how to do it.
"""
br = robobrowser.RoboBrowser()
br.open("https://mhvlug.org/cgi-bin/mailman/admin/mhvlug/nondigest")
form = br.get_form(action='/cgi-bin/mailman/admin/mhvlug/nondigest')
form['adminpw'].value = passwd
br.submit_form(form)
# Now we are logged in
br.open("https://mhvlug.org/cgi-bin/mailman/admin/mhvlug/nondigest")
form = br.get_forms()[0]
cur_footer = form['msg_footer'].value.split("Upcoming Meetings")[0]
cur_footer += ("Upcoming Meetings (6pm - 8pm) "
"Vassar College *\n")
for meeting in meetings:
cur_footer += meeting + "\n"
form['msg_footer'].value = cur_footer
br.submit_form(form)
def load_conf():
return yaml.load(open("config.yaml"))
def main():
conf = load_conf()
meetings = next_meetings(int(conf['entries']))
update_mailman(meetings, passwd=conf['pass'])
if __name__ == '__main__':
main()
| apache-2.0 | -8,760,569,111,229,455,000 | 31.807692 | 79 | 0.646249 | false | 3.684665 | false | false | false |
Huyuwei/tvm | rust/runtime/tests/build_model.py | 2 | 2571 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Builds a simple NNVM graph for testing."""
from os import path as osp
import nnvm
from nnvm import sym
from nnvm.compiler import graph_util
from nnvm.testing import init
import numpy as np
import tvm
CWD = osp.dirname(osp.abspath(osp.expanduser(__file__)))
def _get_model(dshape):
data = sym.Variable('data', shape=dshape)
fc1 = sym.dense(data, units=dshape[-1]*2, use_bias=True)
left, right = sym.split(fc1, indices_or_sections=2, axis=1)
return sym.Group(((left + 1), (right - 1)))
def _init_params(graph, input_shapes, initializer=init.Xavier(), seed=10):
if isinstance(graph, sym.Symbol):
graph = nnvm.graph.create(graph)
ishapes, _ = graph_util.infer_shape(graph, **input_shapes)
param_shapes = dict(zip(graph.index.input_names, ishapes))
np.random.seed(seed)
params = {}
for param, shape in param_shapes.items():
if param in {'data', 'label'} or not shape:
continue
init_value = np.empty(shape).astype('float32')
initializer(param, init_value)
params[param] = tvm.nd.array(init_value)
return params
def main():
dshape = (32, 16)
net = _get_model(dshape)
ishape_dict = {'data': dshape}
params = _init_params(net, ishape_dict)
graph, lib, params = nnvm.compiler.build(net, 'llvm',
shape=ishape_dict,
params=params,
dtype='float32')
with open(osp.join(CWD, 'graph.json'), 'w') as f_resnet:
f_resnet.write(graph.json())
with open(osp.join(CWD, 'graph.params'), 'wb') as f_params:
f_params.write(nnvm.compiler.save_param_dict(params))
if __name__ == '__main__':
main()
| apache-2.0 | 7,480,344,979,134,752,000 | 35.211268 | 74 | 0.651886 | false | 3.5758 | false | false | false |
tonyin/optionstg | run.py | 1 | 9422 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import argparse
import json
import os
import shutil
import sys
import time
from main import config
###############################################################################
# Options
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument(
'-w', '--watch', dest='watch', action='store_true',
help='watch files for changes when running the development web server',
)
parser.add_argument(
'-c', '--clean', dest='clean', action='store_true',
help='''recompiles files when running the development web server, but
obsolete if -s is used''',
)
parser.add_argument(
'-m', '--minify', dest='minify', action='store_true',
help='compiles files into minified version before deploying'
)
parser.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
parser.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
parser.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
parser.add_argument(
'-f', '--flush', dest='flush', action='store_true',
help='clears the datastore, blobstore, etc',
)
args = parser.parse_args()
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_STATIC = 'static'
DIR_SRC = 'src'
DIR_STYLE = 'style'
DIR_SCRIPT = 'script'
DIR_MIN = 'min'
DIR_DST = 'dst'
DIR_LIB = 'lib'
DIR_NODE_MODULES = 'node_modules'
DIR_BIN = '.bin'
DIR_TEMP = 'temp'
DIR_STORAGE = 'storage'
FILE_ZIP = '%s.zip' % DIR_LIB
FILE_COFFEE = 'coffee'
FILE_LESS = 'lessc'
FILE_UGLIFYJS = 'uglifyjs'
dir_static = os.path.join(DIR_MAIN, DIR_STATIC)
dir_src = os.path.join(dir_static, DIR_SRC)
dir_src_script = os.path.join(dir_src, DIR_SCRIPT)
dir_src_style = os.path.join(dir_src, DIR_STYLE)
dir_dst = os.path.join(dir_static, DIR_DST)
dir_dst_style = os.path.join(dir_dst, DIR_STYLE)
dir_dst_script = os.path.join(dir_dst, DIR_SCRIPT)
dir_min = os.path.join(dir_static, DIR_MIN)
dir_min_style = os.path.join(dir_min, DIR_STYLE)
dir_min_script = os.path.join(dir_min, DIR_SCRIPT)
dir_lib = os.path.join(DIR_MAIN, DIR_LIB)
file_lib = os.path.join(DIR_MAIN, FILE_ZIP)
dir_bin = os.path.join(DIR_NODE_MODULES, DIR_BIN)
file_coffee = os.path.join(dir_bin, FILE_COFFEE)
file_less = os.path.join(dir_bin, FILE_LESS)
file_uglifyjs = os.path.join(dir_bin, FILE_UGLIFYJS)
dir_storage = os.path.join(DIR_TEMP, DIR_STORAGE)
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def remove_dir(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
def clean_files():
bad_endings = ['pyc', '~']
print_out(
'CLEAN FILES',
'Removing files: %s' % ', '.join(['*%s' % e for e in bad_endings]),
)
for home, dirs, files in os.walk(DIR_MAIN):
for f in files:
for b in bad_endings:
if f.endswith(b):
os.remove(os.path.join(home, f))
def merge_files(source, target):
fout = open(target, 'a')
for line in open(source):
fout.write(line)
fout.close()
def os_execute(executable, args, source, target, append=False):
operator = '>>' if append else '>'
os.system('"%s" %s %s %s %s' % (executable, args, source, operator, target))
def compile_script(source, target_dir):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
target = source.replace(dir_src_script, target_dir).replace('.coffee', '.js')
if not is_dirty(source, target):
return
make_dirs(os.path.dirname(target))
if not source.endswith('.coffee'):
print_out('COPYING', source)
shutil.copy(source, target)
return
print_out('COFFEE', source)
os_execute(file_coffee, '-cp', source, target)
def compile_style(source, target_dir, check_modified=False):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
target = source.replace(dir_src_style, target_dir).replace('.less', '.css')
minified = ''
if not source.endswith('.less'):
return
if check_modified and not is_style_modified(target):
return
if target_dir == dir_min_style:
minified = '-x'
target = target.replace('.css', '.min.css')
print_out('LESS MIN', source)
else:
print_out('LESS', source)
make_dirs(os.path.dirname(target))
os_execute(file_less, minified, source, target)
def make_lib_zip(force=False):
if force and os.path.isfile(file_lib):
os.remove(file_lib)
if not os.path.isfile(file_lib):
print_out('ZIP', file_lib)
shutil.make_archive(dir_lib, 'zip', dir_lib)
def is_dirty(source, target):
if not os.access(target, os.O_RDONLY):
return True
return os.stat(source).st_mtime - os.stat(target).st_mtime > 0
def is_style_modified(target):
for folder, folders, files in os.walk(dir_src):
for file_ in files:
path = os.path.join(folder, file_)
if path.endswith('.less') and is_dirty(path, target):
return True
return False
def compile_all_dst():
for source in config.STYLES:
compile_style(os.path.join(dir_static, source), dir_dst_style, True)
for module in config.SCRIPTS:
for source in config.SCRIPTS[module]:
compile_script(os.path.join(dir_static, source), dir_dst_script)
def update_path_separators():
def fixit(path):
return path.replace('\\', '/').replace('/', os.sep)
for idx in xrange(len(config.STYLES)):
config.STYLES[idx] = fixit(config.STYLES[idx])
for module in config.SCRIPTS:
for idx in xrange(len(config.SCRIPTS[module])):
config.SCRIPTS[module][idx] = fixit(config.SCRIPTS[module][idx])
def install_dependencies():
missing = False
if not os.path.exists(file_coffee):
missing = True
if not os.path.exists(file_less):
missing = True
if not os.path.exists(file_uglifyjs):
missing = True
if not os.path.exists(os.path.join(DIR_NODE_MODULES, 'grunt')):
missing = True
try:
file_package = os.path.join(DIR_NODE_MODULES, 'uglify-js', 'package.json')
package_json = json.load(open(file_package))
version = package_json['version']
if int(version.split('.')[0]) < 2:
missing = True
except:
missing = True
if missing:
os.system('npm install')
def update_missing_args():
if args.start:
args.clean = True
def uniq(seq):
seen = set()
return [e for e in seq if e not in seen and not seen.add(e)]
###############################################################################
# Main
###############################################################################
os.chdir(os.path.dirname(os.path.realpath(__file__)))
update_path_separators()
install_dependencies()
update_missing_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
if args.clean:
print_out('CLEAN')
clean_files()
make_lib_zip(force=True)
remove_dir(dir_dst)
make_dirs(dir_dst)
compile_all_dst()
print_out('DONE')
if args.minify:
print_out('MINIFY')
clean_files()
make_lib_zip(force=True)
remove_dir(dir_min)
make_dirs(dir_min_script)
for source in config.STYLES:
compile_style(os.path.join(dir_static, source), dir_min_style)
for module in config.SCRIPTS:
scripts = uniq(config.SCRIPTS[module])
coffees = ' '.join([
os.path.join(dir_static, script)
for script in scripts if script.endswith('.coffee')
])
pretty_js = os.path.join(dir_min_script, '%s.js' % module)
ugly_js = os.path.join(dir_min_script, '%s.min.js' % module)
print_out('COFFEE MIN', ugly_js)
if len(coffees):
os_execute(file_coffee, '--join -cp', coffees, pretty_js, append=True)
for script in scripts:
if not script.endswith('.js'):
continue
script_file = os.path.join(dir_static, script)
merge_files(script_file, pretty_js)
os_execute(file_uglifyjs, pretty_js, '-cm', ugly_js)
os.remove(pretty_js)
print_out('DONE')
if args.watch:
print_out('WATCHING')
make_lib_zip()
make_dirs(dir_dst)
compile_all_dst()
print_out('DONE', 'and watching for changes (Ctrl+C to stop)')
while True:
time.sleep(0.5)
reload(config)
update_path_separators()
compile_all_dst()
if args.flush:
remove_dir(dir_storage)
print_out('STORAGE CLEARED')
if args.start:
make_dirs(dir_storage)
clear = 'yes' if args.flush else 'no'
port = int(args.port)
run_command = '''
python ../../../"Program Files (X86)"/Google/google_appengine/dev_appserver.py %s
--host %s
--port %s
--admin_port %s
--storage_path=%s
--clear_datastore=%s
--skip_sdk_update_check
''' % (DIR_MAIN, args.host, port, port + 1, dir_storage, clear)
os.system(run_command.replace('\n', ' '))
| mit | 3,240,041,015,489,999,400 | 26.469388 | 87 | 0.609425 | false | 3.221197 | true | false | false |
igel-kun/pyload | module/plugins/internal/UnTar.py | 1 | 1811 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
import tarfile
from .Extractor import ArchiveError, CRCError, Extractor
from .misc import encode, fsjoin
class UnTar(Extractor):
__name__ = "UnTar"
__type__ = "extractor"
__version__ = "0.05"
__status__ = "stable"
__description__ = """TAR extractor plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
VERSION = "%s.%s.%s" % (sys.version_info[0],
sys.version_info[1],
sys.version_info[2])
@classmethod
def isarchive(cls, filename):
try:
return tarfile.is_tarfile(encode(filename))
except:
return False
@classmethod
def find(cls):
return sys.version_info[:2] >= (2, 5)
def list(self, password=None):
with tarfile.open(self.filename) as t:
self.files = [fsjoin(self.dest, _f) for _f in t.getnames()]
return self.files
def verify(self, password=None):
try:
t = tarfile.open(self.filename, errorlevel=1)
except tarfile.CompressionError, e:
raise CRCError(e)
except (OSError, tarfile.TarError), e:
raise ArchiveError(e)
else:
t.close()
def extract(self, password=None):
self.verify(password)
try:
with tarfile.open(self.filename, errorlevel=2) as t:
t.extractall(self.dest)
self.files = t.getnames()
return self.files
except tarfile.ExtractError, e:
self.log_warning(e)
except tarfile.CompressionError, e:
raise CRCError(e)
except (OSError, tarfile.TarError), e:
raise ArchiveError(e)
| gpl-3.0 | 7,260,781,826,180,916,000 | 24.507042 | 71 | 0.555494 | false | 3.828753 | false | false | false |
kubeflow/testing | py/kubeflow/testing/delete_kf_instance.py | 1 | 2404 | """Delete a kubeflow instance."""
import fire
import json
import logging
import retrying
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from kubeflow.testing import util
@retrying.retry(stop_max_delay=10*60*1000, wait_exponential_max=60*1000,
wait_exponential_multiplier=1000)
def delete_deployment(dm, project, name):
deployments_client = dm.deployments()
try:
op = deployments_client.delete(project=project, deployment=name).execute()
except errors.HttpError as e:
if not e.content:
raise
error_content = json.loads(e.content)
message = error_content.get('error', {}).get('message', "")
logging.info("delete deployment error %s", message)
code = error_content.get('error', {}).get('code', 0)
if code == 404: # pylint: disable=no-else-return
logging.info("Project %s doesn't have deployment %s", project, name)
return
elif code == 409:
logging.info("Conflicting operation in progress")
raise ValueError("Can't delete deployment confliction operation in "
"progress")
raise
zone = None
op = util.wait_for_gcp_operation(dm.operations(), project, zone, op["name"])
logging.info("Final op: %s", op)
class KFDeleter:
def delete_kf(self, project, name):
"""Delete a KF instance with the specified name in the specified project."""
# TODO(jlewi): This is a bit of a hack due to the fact that kfctl
# doesn't properly handle deletion just given the name of a kubeflow
# deployment. Once that's fixed we should just use that.
util.maybe_activate_service_account()
credentials = GoogleCredentials.get_application_default()
dm = discovery.build("deploymentmanager", "v2", credentials=credentials)
for dm_name in [name, name + "-storage"]:
logging.info("Deleting project %s deployment %s", project, dm_name)
delete_deployment(dm, project, dm_name)
# TODO(jlewi): Cleanup other resources like certificates and backends
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
fire.Fire(KFDeleter)
| apache-2.0 | 50,440,597,814,513,940 | 37.774194 | 80 | 0.665141 | false | 3.934534 | false | false | false |
morepath/more.jwtauth | more/jwtauth/main.py | 1 | 15396 | """Provides the JWTIdentityPolicy.
The following settings are available:
* master_secret: A secret known only by the server, used for
the default HMAC (HS*) algorithm.
* private_key: An Elliptic Curve or an RSA private_key used for
the EC (EC*) or RSA (PS*/RS*) algorithms.
* private_key_file: A file holding an Elliptic Curve or an RSA encoded
(PEM/DER) private_key.
* public_key: An Elliptic Curve or an RSA public_key used for the EC (EC*)
or RSA (PS*/RS*) algorithms.
* public_key_file: A file holding an Elliptic Curve
or an RSA encoded (PEM/DER) public_key.
* algorithm: The algorithm used to sign the key (defaults to HS256).
* expiration_delta: Time delta from now until the token will expire.
This can either be a datetime.timedelta or the number of seconds.
Default is 30 minutes, set to None to disable expiration.
* leeway: The leeway, which allows you to validate an expiration time
which is in the past, but not very far. To use as a datetime.timedelta
or the number of seconds. Defaults is 0.
* allow_refresh: Enables the token refresh API when True.
Default is False
* refresh_delta: A time delta in which the token can be refreshed
considering the leeway.
This can either be a datetime.timedelta or the number of seconds.
Default is 7 days. When None you can always refresh the token.
* refresh_nonce_handler: Either dotted path to callback function or the
callback function itself, which receives the userid as argument and
returns a nonce which will be validated before refreshing.
When None no nonce will be created or validated for refreshing.
Default is None.
* verify_expiration_on_refresh: If False, expiration_delta for the JWT
token will not be checked during refresh. Otherwise you can refresh
the token only if it's not yet expired. Default is False.
* issuer: This is a string that will be checked against the iss claim of
the token. You can use this e.g. if you have several related apps with
exclusive user audience. Default is None (do not check iss on JWT).
* auth_header_prefix: You can modify the Authorization header value prefix
that is required to be sent together with the token. The default value
is JWT. Another common value used for tokens is Bearer.
* userid_claim: The claim, which contains the user id.
The default claim is 'sub'.
The library takes either a master_secret or private_key/public_key pair.
In the later case the algorithm must be an EC*, PS* or RS* version.
"""
from calendar import timegm
from datetime import datetime, timedelta
import jwt
from morepath import Identity, NO_IDENTITY
from . import (
InvalidTokenError,
DecodeError,
ExpiredSignatureError,
MissingRequiredClaimError,
)
from .utils import handler
class JWTIdentityPolicy:
"""Morepath Identity Policy implementing JWT Access Auth.
This class provides an IdentityPolicy implementation based on
signed requests, using the JSON Web Token Authentication standard.
Reference:
http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html
"""
def __init__(
self,
master_secret=None,
private_key=None,
private_key_file=None,
public_key=None,
public_key_file=None,
algorithm="HS256",
expiration_delta=timedelta(minutes=30),
leeway=0,
allow_refresh=False,
refresh_delta=timedelta(days=7),
refresh_nonce_handler=None,
verify_expiration_on_refresh=False,
issuer=None,
auth_header_prefix="JWT",
userid_claim="sub",
):
"""Initiate the JWTIdentityPolicy with the given settings."""
_public_key = master_secret
if public_key is not None:
_public_key = public_key
if public_key_file is not None:
with open(public_key_file) as key_pub_file:
_public_key = key_pub_file.read()
self.public_key = _public_key
_private_key = master_secret
if private_key is not None:
_private_key = private_key
if private_key_file is not None:
with open(private_key_file) as key_priv_file:
_private_key = key_priv_file.read()
self.private_key = _private_key
self.algorithm = algorithm
if isinstance(expiration_delta, timedelta):
expiration_delta = expiration_delta.total_seconds()
self.expiration_delta = expiration_delta
if leeway is None:
leeway = 0
elif isinstance(leeway, timedelta):
leeway = leeway.total_seconds()
self.leeway = leeway
self.allow_refresh = allow_refresh
if isinstance(refresh_delta, timedelta):
refresh_delta = refresh_delta.total_seconds()
self.refresh_delta = refresh_delta
if isinstance(refresh_nonce_handler, str):
self.refresh_nonce_handler = handler(refresh_nonce_handler)
else:
self.refresh_nonce_handler = refresh_nonce_handler
self.verify_expiration_on_refresh = verify_expiration_on_refresh
self.issuer = issuer
self.auth_header_prefix = auth_header_prefix
self.userid_claim = userid_claim
def identify(self, request):
"""Establish what identity this user claims to have from request.
:param request: Request to extract identity information from.
:type request: :class:`morepath.Request`.
:returns: :class:`morepath.Identity` instance or
:attr:`morepath.NO_IDENTITY` if identity cannot
be established.
"""
token = self.get_jwt(request)
if token is None:
return NO_IDENTITY
try:
claims_set = self.decode_jwt(token)
except (DecodeError, ExpiredSignatureError):
return NO_IDENTITY
userid = self.get_userid(claims_set)
if userid is None:
return NO_IDENTITY
extra_claims = self.get_extra_claims(claims_set)
if extra_claims is not None:
return Identity(userid=userid, **extra_claims)
else:
return Identity(userid=userid)
def remember(self, response, request, identity):
"""Remember identity on response.
Implements ``morepath.App.remember_identity``, which is called
from user login code.
Create a JWT token and return it as the Authorization field of the
response header.
:param response: response object on which to store identity.
:type response: :class:`morepath.Response`
:param request: request object.
:type request: :class:`morepath.Request`
:param identity: identity to remember.
:type identity: :class:`morepath.Identity`
"""
claims = identity.as_dict()
userid = claims.pop("userid")
claims_set = self.create_claims_set(request, userid, claims)
token = self.encode_jwt(claims_set)
response.headers["Authorization"] = "{} {}".format(
self.auth_header_prefix,
token,
)
def forget(self, response, request):
"""Forget identity on response.
Implements ``morepath.App.forget_identity``, which is called from
user logout code.
This is a no-op for this identity policy. The client is supposed to
handle logout and remove the token.
:param response: response object on which to forget identity.
:type response: :class:`morepath.Response`
:param request: request object.
:type request: :class:`morepath.Request`
"""
pass
def decode_jwt(self, token, verify_expiration=True):
"""Decode a JWTAuth token into its claims set.
This method decodes the given JWT to provide the claims set. The JWT
can fail if the token has expired (with appropriate leeway) or if the
token won't validate due to the secret (key) being wrong.
If private_key/public key is set then the public_key will be used
to decode the key.
The leeway and issuer settings will be passed to jwt.decode.
:param token: the JWTAuth token.
:param verify_expiration: if False the expiration time will not
be checked.
"""
options = {
"verify_exp": verify_expiration,
}
return jwt.decode(
token,
self.public_key,
algorithms=[self.algorithm],
options=options,
leeway=self.leeway,
issuer=self.issuer,
)
def create_claims_set(self, request, userid, extra_claims=None):
"""Create the claims set based on the userid of the claimed identity,
the settings and the extra_claims dictionary.
The userid will be stored in settings.jwtauth.userid_claim
(default: "sub").
If settings.jwtauth.expiration_delta is set it will be added
to the current time and stored in the "exp" claim.
If settings.jwtauth.issuer is set, it get stored in the "iss" claim.
If settings.jwtauth.refresh_delta is set it will be added
to the current time and stored in the "refresh_until" claim and
the return value of settings.jwtauth.refresh_nonce_handler called with
"user_id" as argument will be stored in the "nonce" claim.
With the extra_claims dictionary you can provide additional claims.
This can be registered claims like "nbf"
(the time before which the token should not be processed) and/or
claims containing extra info
about the identity, which will be stored in the Identity object.
:param request: current request object.
:type request: :class:`morepath.Request`
:param userid: the userid of the claimed identity.
:param extra_claims: dictionary, containing additional claims or None.
"""
claims_set = {self.userid_claim: userid}
now = timegm(datetime.utcnow().utctimetuple())
if self.expiration_delta is not None:
claims_set["exp"] = now + self.expiration_delta
if self.issuer is not None:
claims_set["iss"] = self.issuer
if self.allow_refresh:
if self.refresh_delta is not None:
claims_set["refresh_until"] = now + self.refresh_delta
if self.refresh_nonce_handler is not None:
claims_set["nonce"] = self.refresh_nonce_handler(
request, userid
)
if extra_claims is not None:
claims_set.update(extra_claims)
return claims_set
def encode_jwt(self, claims_set):
"""Encode a JWT token based on the claims_set and the settings.
If available, registry.settings.jwtauth.private_key is used as key.
In this case the algorithm must be an RS* or EC* algorithm.
If registry.settings.jwtauth.private_key is not set,
registry.settings.jwtauth.master_secret is used.
registry.settings.jwtauth.algorithm is used as algorithm.
:param claims_set: set of claims, which will be included in
the created token.
"""
token = jwt.encode(
claims_set,
self.private_key,
self.algorithm,
).decode(encoding="UTF-8")
return token
def get_userid(self, claims_set):
"""Extract the userid from a claims set.
Returns userid or None if there is none.
:param claims_set: set of claims, which was included
in the received token.
"""
if self.userid_claim in claims_set:
return claims_set[self.userid_claim]
else:
return None
def get_extra_claims(self, claims_set):
"""Get claims holding extra identity info from the claims set.
Returns a dictionary of extra claims or None if there are none.
:param claims_set: set of claims, which was included in the received
token.
"""
reserved_claims = (
self.userid_claim,
"iss",
"aud",
"exp",
"nbf",
"iat",
"jti",
"refresh_until",
"nonce",
)
extra_claims = {}
for claim in claims_set:
if claim not in reserved_claims:
extra_claims[claim] = claims_set[claim]
if not extra_claims:
return None
return extra_claims
def get_jwt(self, request):
"""Extract the JWT token from the authorisation header of the request.
Returns the JWT token or None, if the token cannot be extracted.
:param request: request object.
:type request: :class:`morepath.Request`
"""
try:
authorization = request.authorization
except ValueError: # pragma: no cover
return None
if authorization is None:
return None
authtype, token = authorization
if authtype.lower() != self.auth_header_prefix.lower():
return None
return token
def verify_refresh(self, request):
"""
Verify if the request to refresh the token is valid.
If valid it returns the userid which can be used to create
an updated identity with ``remember_identity``.
Otherwise it raises an exception based on InvalidTokenError.
:param request: current request object
:type request: :class:`morepath.Request`
:returns: userid
:raises: InvalidTokenError, ExpiredSignatureError, DecodeError,
MissingRequiredClaimError
"""
if not self.allow_refresh:
raise InvalidTokenError("Token refresh is disabled")
token = self.get_jwt(request)
if token is None:
raise InvalidTokenError("Token not found")
try:
claims_set = self.decode_jwt(
token, self.verify_expiration_on_refresh
)
# reraise the exceptions to change the error messages
except DecodeError:
raise DecodeError("Token could not be decoded")
except ExpiredSignatureError:
raise ExpiredSignatureError("Token has expired")
userid = self.get_userid(claims_set)
if userid is None:
raise MissingRequiredClaimError(self.userid_claim)
if self.refresh_nonce_handler is not None:
if "nonce" not in claims_set:
raise MissingRequiredClaimError("nonce")
if (
self.refresh_nonce_handler(request, userid)
!= claims_set["nonce"]
):
raise InvalidTokenError("Refresh nonce is not valid")
if self.refresh_delta is not None:
if "refresh_until" not in claims_set:
raise MissingRequiredClaimError("refresh_until")
now = timegm(datetime.utcnow().utctimetuple())
refresh_until = int(claims_set["refresh_until"])
if refresh_until < (now - self.leeway):
raise ExpiredSignatureError("Refresh nonce has expired")
return userid
| bsd-3-clause | 8,533,879,541,043,725,000 | 36.009615 | 79 | 0.626137 | false | 4.292166 | false | false | false |
open-rnd/ros3d-dev-controller | ros3ddevcontroller/mqtt/mqttornado.py | 1 | 4038 | #
# Copyright (c) 2015 Open-RnD Sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""MQTT to Tornado adapter"""
import logging
from tornado.ioloop import IOLoop, PeriodicCallback
# periodic check with MQTT client library to execute misc actions
# (pings, etc.)
MQTT_MISC_PERIOD = 10 * 1000
LOG = logging.getLogger('mqttornado')
class MQTTornadoAdapter(object):
"""Adapter for interfacing MQTT Client with Tornado framework"""
def __init__(self, client, loop=None):
"""Create new adapter for given client instance
:param mqtt.Client client: MQTT client instance
:param tornado.ioloop.IOLoop loop: Tonardo IOLoop instance,
None to use default loop
"""
self._client = client
self._fd = self._client_fd()
self._loop = loop
self._read_events = IOLoop.READ | IOLoop.ERROR
self._all_events = self._read_events | IOLoop.WRITE
if not self._loop:
self._loop = IOLoop.instance()
LOG.debug('setup handlers')
self._loop.add_handler(self._client_fd(),
self._io_clbk,
self._all_events)
self._periodic = PeriodicCallback(self._periodic_clbk,
MQTT_MISC_PERIOD,
io_loop=self._loop)
self._periodic.start()
def stop(self):
"""Stop Adapter
"""
self._loop.remove_handler(self._fd)
self._periodic.stop();
self._periodic = None
def _client_fd(self):
"""Return MQTT client FD if already set otherwise raise an
exception
:rtype: int
:return: MQTT client fd
"""
sock = self._client.socket()
if sock == None:
raise RuntimeError('not connected to broker')
LOG.debug('socket: %s', sock.fileno())
return sock.fileno()
def _io_clbk(self, _, event):
"""IO Callback from Tornado"""
LOG.debug('IO event: 0x%x', event)
if event & IOLoop.READ:
self._client.loop_read()
if event & IOLoop.ERROR:
self._client.loop_read()
if event & IOLoop.WRITE:
self._client.loop_write()
if self.poll_writes() == False:
self._loop.update_handler(self._client_fd(),
self._read_events)
def _periodic_clbk(self):
"""Periodic callback handler"""
# LOG.debug('periodic check')
self._client.loop_misc()
def poll_writes(self):
"""Check if client wants to write anything and schedule write
action
:return: True if client wants to write, False otherwise
:rtype: bool"""
if self._client.want_write():
LOG.debug('want write')
self._loop.update_handler(self._client_fd(),
self._all_events)
return True
return False
| mit | -2,852,210,188,878,344,700 | 32.932773 | 69 | 0.604755 | false | 4.360691 | false | false | false |
ThunderGemios10/The-Super-Duper-Script-Editor | wrd/bin.py | 1 | 5171 | ################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
import bitstring
from bitstring import BitStream, ConstBitStream
from wrd.ops import *
from wrd.parser import parse_command, InvalidWrdHeader
################################################################################
### Converts binary wrd data to a list of commands which can be used in all
### the other functions in this module.
################################################################################
def from_bin(data):
# Eat the header.
parse_command(data)
commands = []
while True:
try:
op, params = parse_command(data)
commands.append((op, params))
except InvalidWrdHeader:
byte = data.read("uint:8")
commands.append((WRD_INVALID, {"val": byte}))
except:
break
return commands
################################################################################
### Converts a list of commands to the binary format used by the game.
################################################################################
def to_bin(commands):
data = BitStream()
lines = 0
for op, params in commands:
if op == WRD_HEADER:
continue
if not op in OP_PARAMS:
# raise Exception("Unknown op: 0x%02X" % op)
print "Unknown op: 0x%02X" % op
continue
param_info = OP_PARAMS[op]
# If it has a custom parsing function, use the equivalent packing function.
if isinstance(param_info, basestring):
command = globals()[OP_FUNCTIONS[op]](**params)
data.append(command)
else:
if op == WRD_SHOW_LINE:
lines += 1
data.append(bitstring.pack("uint:8, uint:8", CMD_MARKER, op))
unnamed_param_id = 0
for param_name, param_type in param_info:
if param_name == None:
data.append(bitstring.pack(param_type, params[param_name][unnamed_param_id]))
unnamed_param_id += 1
else:
data.append(bitstring.pack(param_type, params[param_name]))
return bitstring.pack("uint:8, uint:8, uintle:16", CMD_MARKER, WRD_HEADER, lines) + data
################################################################################
### Special function definitions.
################################################################################
def check_flag_a(flags, flag_ops, fail_label):
# XX XX 00 YY
# * If there are multiple flags (as many as needed)
# -> WW XX XX 00 YY
#
# * When all the flags have been listed.
# -> 70 3C 70 34 ZZ ZZ
#
# * XX XX = Flag group/ID
# * YY = Flag State
# * 00 = Off
# * 01 = On
#
# * WW = Operator
# * 06 = AND
# * 07 = OR (?)
#
# * ZZ ZZ = Label to jump to if check failed.
command = bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_CHECKFLAG_A)
for i, (flag_group, flag_id, flag_state, long_state) in enumerate(flags):
command += bitstring.pack("uint:8, uint:8", flag_group, flag_id)
if long_state:
command += bitstring.pack("uint:16", flag_state)
else:
command += bitstring.pack("uint:8", flag_state)
if i < len(flag_ops):
command += bitstring.pack("uint:8", flag_ops[i])
command += bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_FLAG_CHECK_END)
if not fail_label == None:
command += bitstring.pack("uint:8, uint:8, uint:16", CMD_MARKER, WRD_GOTO_LABEL, fail_label)
return command
def check_flag_b(flags, flag_ops, fail_label):
command = bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_CHECKFLAG_B)
for i, (unk1, unk2, unk3, unk4, unk5) in enumerate(flags):
command += bitstring.pack("uint:8, uint:8, uint:8, uint:8, uint:8", unk1, unk2, unk3, unk4, unk5)
if i < len(flag_ops):
command += bitstring.pack("uint:8", flag_ops[i])
command += bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_FLAG_CHECK_END)
if not fail_label == None:
command += bitstring.pack("uint:8, uint:8, uint:16", CMD_MARKER, WRD_GOTO_LABEL, fail_label)
return command
def wait_frames(frames):
return bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_WAIT_FRAME) * frames
def byte(val):
return bitstring.pack("uint:8", val)
### EOF ### | gpl-3.0 | -3,351,541,255,566,452,700 | 31.509434 | 101 | 0.555728 | false | 3.742216 | false | false | false |
sdss/marvin | tasks.py | 1 | 6407 | # !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-06-10 16:46:40
# @Last modified by: José Sánchez-Gallego ([email protected])
# @Last modified time: 2018-11-14 19:37:22
from __future__ import absolute_import, division, print_function
import os
from invoke import Collection, task
DIRPATH = '/home/manga/software/git/manga/marvin'
MODULEPATH = '/home/manga/software/git/modulefiles'
@task
def clean_docs(ctx):
''' Cleans up the docs '''
print('Cleaning the docs')
ctx.run("rm -rf docs/sphinx/_build")
@task
def build_docs(ctx, clean=False):
''' Builds the Sphinx docs '''
if clean:
print('Cleaning the docs')
ctx.run("rm -rf docs/sphinx/_build")
print('Building the docs')
os.chdir('docs/sphinx')
ctx.run("make html", pty=True)
@task
def show_docs(ctx):
"""Shows the Sphinx docs"""
print('Showing the docs')
os.chdir('docs/sphinx/_build/html')
ctx.run('open ./index.html')
@task
def clean(ctx):
''' Cleans up the crap '''
print('Cleaning')
# ctx.run("rm -rf docs/sphinx/_build")
ctx.run("rm -rf htmlcov")
ctx.run("rm -rf build")
ctx.run("rm -rf dist")
@task(clean)
def deploy(ctx, repo=None):
''' Deploy to pypi '''
print('Deploying to Pypi!')
rstr = ''
if repo:
rstr = '-r {0}'.format(repo)
ctx.run("python setup.py sdist bdist_wheel --universal")
ctx.run("twine upload {0} dist/*".format(rstr))
@task
def update_default(ctx, path=None, version=None):
''' Updates the default version module file'''
assert version is not None, 'A version is required to update the default version!'
assert path is not None, 'A path must be specified!'
# update default version
f = open('.version', 'r+')
data = f.readlines()
data[1] = 'set ModulesVersion "{0}"\n'.format(version)
f.seek(0, 0)
f.writelines(data)
f.close()
@task
def update_module(ctx, path=None, wrap=None, version=None):
''' Update a module file '''
assert version is not None, 'A version is required to update the module file!'
assert path is not None, 'A path must be specified!'
print('Setting up module files!')
os.chdir(path)
newfile = 'mangawork.marvin_{0}'.format(version) if wrap else version
oldfile = 'mangawork.marvin_2.1.3' if wrap else 'master'
searchline = 'marvin' if wrap else 'version'
ctx.run('cp {0} {1}'.format(oldfile, newfile))
f = open('{0}'.format(newfile), 'r+')
data = f.readlines()
index, line = [(i, line) for i, line in enumerate(data)
if 'set {0}'.format(searchline) in line][0]
data[index] = 'set {0} {1}\n'.format(searchline, version)
f.seek(0, 0)
f.writelines(data)
f.close()
# update the default version
update_default(ctx, path=path, version=newfile)
@task
def update_git(ctx, version=None):
''' Update the git package at Utah '''
assert version is not None, 'A version is required to checkout a new git repo!'
print('Checking out git tag {0}'.format(version))
verpath = os.path.join(DIRPATH, version)
# checkout and setup new git tag
os.chdir(DIRPATH)
ctx.run('git clone https://github.com/sdss/marvin.git {0}'.format(version))
os.chdir(verpath)
ctx.run('git checkout {0}'.format(version))
ctx.run('git submodule update --init --recursive')
# ctx.run('python -c "from get_version import generate_version_py; '
# 'generate_version_py(\'sdss-marvin\', {0}, False)'.format(version))
@task
def update_current(ctx, version=None):
''' Update the current symlink '''
assert version is not None, 'A version is required to update the current symlink!'
# reset the current symlink
os.chdir(DIRPATH)
ctx.run('rm current')
ctx.run('ln -s {0} current'.format(version))
@task
def switch_module(ctx, version=None):
''' Switch to the marvin module of the specified version and start it '''
assert version is not None, 'A version is required to setup Marvin at Utah!'
ctx.run('uwsgi --stop /home/www/sas.sdss.org/mangawork/marvin/pid/uwsgi_marvin.pid')
ctx.run('module unload wrapmarvin')
ctx.run('module load wrapmarvin/mangawork.marvin_{0}'.format(version))
ctx.run('uwsgi /home/manga/software/git/manga/marvin/{0}/python/marvin/web/uwsgi_conf_files/uwsgi_marvin_mangawork.ini'.format(version))
@task
def update_uwsgi(ctx, version=None):
''' Reset the uwsgi symlink to the new version and touch the file to Emperor reload Marvin '''
assert version is not None, 'A version is required to setup Marvin at Utah!'
os.chdir('/etc/uwsgi/vassals')
new_path = '/home/manga/software/git/manga/marvin/{0}/python/marvin/web/uwsgi_conf_files/uwsgi_marvin_mangawork.ini'.format(version)
ctx.run('rm uwsgi_marvin_mangawork.ini')
ctx.run('ln -s {0} uwsgi_marvin_mangawork.ini'.format(new_path))
ctx.run('touch uwsgi_marvin_mangawork.ini')
@task
def setup_utah(ctx, version=None):
''' Setup the package at Utah and update the release '''
assert version is not None, 'A version is required to setup Marvin at Utah!'
# update git
update_git(ctx, version=version)
# update_current
update_current(ctx, version=version)
# update modules
marvin = os.path.join(MODULEPATH, 'marvin')
wrap = os.path.join(MODULEPATH, 'wrapmarvin')
update_module(ctx, path=marvin, version=version)
update_module(ctx, path=wrap, wrap=True, version=version)
# restart the new marvin
# switch_module(ctx, version=version)
update_uwsgi(ctx, version=version)
print('Marvin version {0} is set up!\n'.format(version))
print('Check for the new Marvin version at the bottom of the Marvin Web main page!')
# print('Please run ...\n stopmarvin \n module switch wrapmarvin '
# 'wrapmarvin/mangawork.marvin_{0} \n startmarvin \n'.format(version))
os.chdir(os.path.dirname(__file__))
ns = Collection(clean, deploy, setup_utah)
docs = Collection('docs')
docs.add_task(build_docs, 'build')
docs.add_task(clean_docs, 'clean')
docs.add_task(show_docs, 'show')
ns.add_collection(docs)
updates = Collection('update')
updates.add_task(update_git, 'git')
updates.add_task(update_current, 'current')
updates.add_task(update_module, 'module')
updates.add_task(update_default, 'default')
ns.add_collection(updates)
| bsd-3-clause | -28,274,147,638,332,620 | 31.18593 | 140 | 0.667291 | false | 3.16609 | false | false | false |
Kronos3/HTML_PARSER | src/config.py | 1 | 6102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# config.py
#
# Copyright 2016 Andrei Tumbar <atuser@Kronos>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os, sys
import platform
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('GtkSource', '3.0')
from gi.repository import Gtk, GObject, GLib, GtkSource, Pango, Gdk
os.chdir ( os.path.dirname ( os.path.realpath ( __file__ ) ) )
import filetab, filemanager, builderset, project, configitem, configfile
class Config:
config_file_relative = ""
config_file_full = ""
__file_lines = None
__file = None
notebook = None
open_dialogue = None
var_dict = {}
var_list = []
list_vars = [ "output_files", "input_files" ]
conf_vars = [ "title", "css", "js" ]
variables_box = Gtk.Box ( )
configitems = []
rows = []
row_raw = []
current_file = {}
current = None
def __init__ ( self, curr_dir, config, notebook, open_dialogue ):
self.open_dialogue = open_dialogue
self.dir = curr_dir
self.notebook = notebook
self.new_config ( config )
def remove_config ( self ):
self.input.destroy ( )
self.output.destroy ( )
self.treeview.destroy ( )
self.var_store = None
self.var_rend = None
self.val_rend = None
self.treeview.destroy ( )
self.var_dict = {}
self.var_list = []
self.list_vars = [ "output_files", "input_files" ]
self.conf_vars = [ "title", "css", "js" ]
self.variables_box = Gtk.Box ( )
self.configitems = []
self.current_file = {}
self.current = None
def new_config ( self, config ):
self.config_file_relative = config
self.config_file_full = self.get_path ( config )
self.__file_lines = open ( self.config_file_relative, "r" ).readlines ( )
self.input = configitem.ConfigItem ( )
self.output = configitem.ConfigItem ( )
self.input.connect ( "new_config", self.get_new )
self.output.connect ( "new_config", self.get_new )
self.input.connect ( "remove_item", self.get_remove )
self.output.connect ( "remove_item", self.get_remove )
for l in self.__file_lines:
if l [ 0 ] == "#" or l == "" or l == "\n":
continue
var, val = l.split ( "=" )
# Remove the whitespace
var = var.strip ( )
val = val.strip ( )
self.var_dict [ var ] = val
self.var_list.append ( var )
if var in self.list_vars:
self.var_dict [ var ] = val.split ( "," )
for var in self.list_vars:
if not var:
continue
buff = self.var_dict [ var ]
exec ( "self.%s.set_notebook ( self.notebook )" % var.replace ( "_files", "" ) )
exec ( "self.%s.set_dialogue ( self.open_dialogue )" % var.replace ( "_files", "" ) )
exec ( "self.%s.add_items ( buff )" % var.replace ( "_files", "" ) )
self.__init_vars__ ( )
for var in self.var_list:
if ( not isinstance ( self.var_dict [ var ], list ) ):
self.add_var ( var )
def get_remove (self, buff_cfg, buff_item):
curr = "output"
if buff_cfg == self.input:
curr = "input"
self.var_dict [ curr + "_files" ].pop ( self.var_dict [ curr + "_files" ].index (buff_item.full_path))
def get_path ( self, _in ):
if self.dir [ -1 ] == "/":
return self.dir + _in
return self.dir + "/" + _in
def get_new ( self, a, confitem ):
if ( confitem == self.input ):
self.current = "input"
else:
self.current = "output"
def add ( self, __files ):
if platform.system () == "Windows":
__files[0] = __files [0][1:]
if ( self.current == "input" ):
self.input.add_items ( __files, remove=False )
self.var_dict ["input_files"].append (__files[0])
else:
self.output.add_items ( __files, remove=False )
self.var_dict ["output_files"].append (__files[0])
def update_file ( self, var, val ):
self.current_file [ var ] = val
def __init_vars__ ( self ):
self.var_store = Gtk.ListStore ( str, str )
self.treeview = Gtk.TreeView.new_with_model ( self.var_store )
self.var_rend = Gtk.CellRendererText ( )
self.val_rend = Gtk.CellRendererText ( )
self.val_rend.set_property('editable', True)
column_1 = Gtk.TreeViewColumn ( "Variables", self.var_rend, text=0 )
column_2 = Gtk.TreeViewColumn ( "Value", self.val_rend, text=1 )
self.treeview.append_column ( column_1 )
self.treeview.append_column ( column_2 )
self.val_rend.connect ( "edited", self.vars_changes )
def vars_changes ( self, renderer, path, new_text ):
self.var_store.set ( self.var_store.get_iter ( path ), 1, new_text )
self.var_dict [ self.var_store.get_value ( self.var_store.get_iter ( path ), 0 ) ] = new_text
def add_var ( self, var, add_to_list=False ):
if ( add_to_list ):
self.var_list.append ( var )
self.var_dict [ var ] = ""
self.var_store.append ( [ var, self.var_dict [ var ] ] )
def open_file ( self, path ):
self.__file_lines = open ( path, "r" ).readlines ( )
self.__file = open ( path, "w" ).readlines ( )
def remove_var ( self ):
model, treeiter = self.treeview.get_selection ( ).get_selected ( )
self.var_dict.pop ( model [ treeiter ] [ 0 ], None )
self.var_list.pop ( self.var_list.index ( model [ treeiter ] [ 0 ] ) )
print (self.var_list)
self.var_store.remove ( treeiter )
def get_conf_out ( self ):
out_buff = []
for x in self.var_list:
buff = self.var_dict [ x ]
if ( isinstance ( self.var_dict [ x ], list ) ):
buff = ",".join ( self.var_dict [ x ] )
buff += ","
out_buff.append ( x + " = " + buff )
return out_buff
| gpl-3.0 | -1,421,159,499,639,127,300 | 28.196172 | 104 | 0.621599 | false | 2.919617 | true | false | false |
intuition-io/intuition | tests/data/test_universe.py | 1 | 2697 | '''
Tests for intuition.data.universe
'''
import os
import unittest
from nose.tools import raises, eq_
import dna.test_utils
import intuition.data.universe as universe
from intuition.errors import LoadMarketSchemeFailed
class MarketTestCase(unittest.TestCase):
def setUp(self):
dna.test_utils.setup_logger(self)
self.default_timezone = 'US/Eastern'
self.default_benchmark = '^GSPC'
self.scheme_path = os.path.expanduser('~/.intuition/data/market.yml')
self.good_universe_description = 'stocks:paris:cac40'
self.bad_universe_description = 'whatever'
def tearDown(self):
dna.test_utils.teardown_logger(self)
# NOTE It also tests market._load_market_scheme()
def test_initialize_market(self):
market = universe.Market()
self.assertIsInstance(market.scheme, dict)
eq_(market.benchmark, self.default_benchmark)
eq_(market.timezone, self.default_timezone)
#eq_(market.open, self.default_open)
#eq_(market.close, self.default_close)
def test_initialize_market_without_scheme(self):
tmp_path = self.scheme_path.replace('market', 'bkp.market')
os.system('mv {} {}'.format(self.scheme_path, tmp_path))
self.assertRaises(LoadMarketSchemeFailed, universe.Market)
os.system('mv {} {}'.format(tmp_path, self.scheme_path))
def test__extract_forex(self):
market = universe.Market()
sids = market._extract_forex()
self.assertGreater(len(sids), 0)
self.assertGreater(sids[0].find('/'), 0)
def test__extract_cac40(self):
market = universe.Market()
sids = market._extract_cac40(['stocks', 'paris', 'cac40'])
self.assertGreater(len(sids), 0)
self.assertGreater(sids[0].find('.pa'), 0)
def test__lookup_sids_no_limit(self):
market = universe.Market()
sids = market._lookup_sids(self.good_universe_description)
self.assertIsInstance(sids, list)
self.assertGreater(len(sids), 0)
def test__lookup_sids_with_limit(self):
limit = 4
market = universe.Market()
sids = market._lookup_sids(self.good_universe_description, limit)
self.assertIsInstance(sids, list)
eq_(len(sids), limit)
@raises(LoadMarketSchemeFailed)
def test__lookup_sids_wrong_market(self):
market = universe.Market()
market._lookup_sids(self.bad_universe_description)
def test_parse_universe(self):
market = universe.Market()
market.parse_universe_description(
self.good_universe_description + ',4')
self.assertIsInstance(market.sids, list)
eq_(len(market.sids), 4)
| apache-2.0 | -2,435,537,797,693,956,600 | 34.486842 | 77 | 0.652948 | false | 3.605615 | true | false | false |
Subsets and Splits