repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
uzh/ceph-tools | cleanup-spurious-images.py | 1 | 6125 | #!/usr/bin/env python
# -*- coding: utf-8 -*-#
#
#
# Copyright (C) 2015, S3IT, University of Zurich. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
"""
__docformat__ = 'reStructuredText'
__author__ = 'Hanieh Rajabi <[email protected]>'
import os
import argparse
import cPickle as pickle
import rados
import rbd
import sys
import re
import sys
import logging
from keystoneclient.auth.identity import v3
from keystoneclient import session
from keystoneclient.v3 import client as keystone_client
from cinderclient import client as cinder_client
import cinderclient.exceptions as cex
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
volume_re = re.compile('^volume-(?P<uuid>\w{8}-\w{4}-\w{4}-\w{4}-\w{12})')
class EnvDefault(argparse.Action):
# This is took from
# http://stackoverflow.com/questions/10551117/setting-options-from-environment-variables-when-using-argparse
def __init__(self, envvar, required=True, default=None, **kwargs):
if envvar and envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def make_session(opts):
"""Create a Keystone session"""
auth = v3.Password(auth_url=opts.os_auth_url,
username=opts.os_username,
password=opts.os_password,
project_name=opts.os_project_name,
user_domain_name=opts.os_user_domain_name,
project_domain_name=opts.os_project_domain_name)
sess = session.Session(auth=auth)
return sess
def cluster_connect(pool, conffile, rados_id):
cluster = rados.Rados(conffile=conffile, rados_id=rados_id)
cluster.connect()
ioctx = cluster.open_ioctx(pool)
return ioctx
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--os-username',
action=EnvDefault,
envvar="OS_USERNAME",
help='OpenStack administrator username. If not supplied, the value of the '
'"OS_USERNAME" environment variable is used.')
parser.add_argument('--os-password',
action=EnvDefault,
envvar="OS_PASSWORD",
help='OpenStack administrator password. If not supplied, the value of the '
'"OS_PASSWORD" environment variable is used.')
parser.add_argument('--os-project-name',
action=EnvDefault,
envvar="OS_PROJECT_NAME",
help='OpenStack administrator project name. If not supplied, the value of the '
'"OS_PROJECT_NAME" environment variable is used.')
parser.add_argument('--os-auth-url',
action=EnvDefault,
envvar="OS_AUTH_URL",
help='OpenStack auth url endpoint. If not supplied, the value of the '
'"OS_AUTH_URL" environment variable is used.')
parser.add_argument('--os-user-domain-name',
action=EnvDefault,
envvar="OS_USER_DOMAIN_NAME",
default='default')
parser.add_argument('--os-project-domain-name',
action=EnvDefault,
envvar="OS_PROJECT_DOMAIN_NAME",
default='default')
parser.add_argument('-p', '--pool',
default='cinder',
help='Ceph pool to use. Default: %(default)s')
parser.add_argument('-c', '--conf', metavar='FILE',
default='/etc/ceph/ceph.conf',
help='Ceph configuration file. '
'Default: %(default)s')
parser.add_argument('-u', '--user',
default='cinder',
help='Ceph user to use to connect. '
'Default: %(default)s')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Increase verbosity')
cfg = parser.parse_args()
# Set verbosity
verbosity = max(0, 3-cfg.verbose) * 10
log.setLevel(verbosity)
ioctx = cluster_connect(cfg.pool, cfg.conf, cfg.user)
rbd_inst = rbd.RBD()
sess = make_session(cfg)
cclient = cinder_client.Client('2', session=sess)
volumenames = [vol for vol in rbd_inst.list(ioctx) if volume_re.match(vol)]
log.info("Got information about %d volumes", len(volumenames))
# Inizializza una lista
to_delete= []
for name in volumenames:
uuid = volume_re.search(name).group('uuid')
log.debug("Checking if cinder volume %s exists", uuid)
try:
cclient.volumes.get(uuid)
log.debug("Volume %s exists.", uuid)
except cex.NotFound:
log.debug("This %s rbd image should be deleted", uuid)
to_delete.append("rbd -p %s rm %s" % (cfg.pool, name))
print "This is the list of commnads you should issue"
print str.join('\n', to_delete)
| gpl-3.0 | 4,818,344,919,758,018,000 | 39.562914 | 112 | 0.597878 | false |
alfredgamulo/cloud-custodian | c7n/resources/sar.py | 2 | 2779 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.actions import Action
from c7n.filters import CrossAccountAccessFilter
from c7n.query import QueryResourceManager, TypeInfo
from c7n.manager import resources
from c7n.utils import type_schema, local_session
@resources.register('serverless-app')
class ServerlessApp(QueryResourceManager):
class resource_type(TypeInfo):
service = 'serverlessrepo'
arn = id = 'ApplicationId'
name = 'Name'
enum_spec = ('list_applications', 'Applications', None)
cfn_type = 'AWS::Serverless::Application'
default_report_fields = [
'ApplicationId', 'Name', 'CreationTime', 'SpdxLicenseId', 'Author']
@ServerlessApp.action_registry.register('delete')
class Delete(Action):
permissions = ('serverlessrepo:DeleteApplication',)
schema = type_schema('delete')
def process(self, resources):
client = local_session(
self.manager.session_factory).client('serverlessrepo')
for r in resources:
self.manager.retry(
client.delete_application,
ApplicationId=r['ApplicationId'])
@ServerlessApp.filter_registry.register('cross-account')
class CrossAccount(CrossAccountAccessFilter):
permissions = ('serverlessrepo:GetApplicationPolicy',)
policy_attribute = 'c7n:Policy'
def process(self, resources, event=None):
client = local_session(
self.manager.session_factory).client('serverlessrepo')
for r in resources:
if self.policy_attribute not in r:
r[self.policy_attribute] = p = client.get_application_policy(
ApplicationId=r['ApplicationId'])
p.pop('ResponseMetadata', None)
self.transform_policy(p)
return super().process(resources)
def transform_policy(self, policy):
"""Serverless Application repositories policies aren't valid iam policies.
Its a service specific spelling that violates basic constraints of the iam
schema. We attempt to normalize it to normal IAM spelling.
"""
policy['Statement'] = policy.pop('Statements')
for s in policy['Statement']:
actions = ['serverlessrepo:%s' % a for a in s['Actions']]
s['Actions'] = actions
if 'Effect' not in s:
s['Effect'] = 'Allow'
if 'Principals' in s:
s['Principal'] = {'AWS': s.pop('Principals')}
if 'PrincipalOrgIDs' in s:
org_ids = s.pop('PrincipalOrgIDs')
if org_ids:
s['Condition'] = {
'StringEquals': {'aws:PrincipalOrgID': org_ids}}
return policy
| apache-2.0 | 6,925,232,051,350,601,000 | 35.565789 | 82 | 0.625405 | false |
Belxjander/Kirito | Python-3.5.0-Amiga/Lib/test/test_pow.py | 7 | 4540 | import test.support, unittest
class PowTest(unittest.TestCase):
def powtest(self, type):
if type != float:
for i in range(-1000, 1000):
self.assertEqual(pow(type(i), 0), 1)
self.assertEqual(pow(type(i), 1), type(i))
self.assertEqual(pow(type(0), 1), type(0))
self.assertEqual(pow(type(1), 1), type(1))
for i in range(-100, 100):
self.assertEqual(pow(type(i), 3), i*i*i)
pow2 = 1
for i in range(0, 31):
self.assertEqual(pow(2, i), pow2)
if i != 30 : pow2 = pow2*2
for othertype in (int,):
for i in list(range(-10, 0)) + list(range(1, 10)):
ii = type(i)
for j in range(1, 11):
jj = -othertype(j)
pow(ii, jj)
for othertype in int, float:
for i in range(1, 100):
zero = type(0)
exp = -othertype(i/10.0)
if exp == 0:
continue
self.assertRaises(ZeroDivisionError, pow, zero, exp)
il, ih = -20, 20
jl, jh = -5, 5
kl, kh = -10, 10
asseq = self.assertEqual
if type == float:
il = 1
asseq = self.assertAlmostEqual
elif type == int:
jl = 0
elif type == int:
jl, jh = 0, 15
for i in range(il, ih+1):
for j in range(jl, jh+1):
for k in range(kl, kh+1):
if k != 0:
if type == float or j < 0:
self.assertRaises(TypeError, pow, type(i), j, k)
continue
asseq(
pow(type(i),j,k),
pow(type(i),j)% type(k)
)
def test_powint(self):
self.powtest(int)
def test_powlong(self):
self.powtest(int)
def test_powfloat(self):
self.powtest(float)
def test_other(self):
# Other tests-- not very systematic
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
for i in range(-10, 11):
for j in range(0, 6):
for k in range(-7, 11):
if j >= 0 and k != 0:
self.assertEqual(
pow(i,j) % k,
pow(i,j,k)
)
if j >= 0 and k != 0:
self.assertEqual(
pow(int(i),j) % k,
pow(int(i),j,k)
)
def test_bug643260(self):
class TestRpow:
def __rpow__(self, other):
return None
None ** TestRpow() # Won't fail when __rpow__ invoked. SF bug #643260.
def test_bug705231(self):
# -1.0 raised to an integer should never blow up. It did if the
# platform pow() was buggy, and Python didn't worm around it.
eq = self.assertEqual
a = -1.0
# The next two tests can still fail if the platform floor()
# function doesn't treat all large inputs as integers
# test_math should also fail if that is happening
eq(pow(a, 1.23e167), 1.0)
eq(pow(a, -1.23e167), 1.0)
for b in range(-10, 11):
eq(pow(a, float(b)), b & 1 and -1.0 or 1.0)
for n in range(0, 100):
fiveto = float(5 ** n)
# For small n, fiveto will be odd. Eventually we run out of
# mantissa bits, though, and thereafer fiveto will be even.
expected = fiveto % 2.0 and -1.0 or 1.0
eq(pow(a, fiveto), expected)
eq(pow(a, -fiveto), expected)
eq(expected, 1.0) # else we didn't push fiveto to evenness
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 4,114,914,832,122,593,300 | 35.031746 | 79 | 0.448678 | false |
frePPLe/frePPLe | freppledb/common/urls.py | 1 | 4543 | #
# Copyright (C) 2007-2017 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf.urls import url
from django.views.generic.base import RedirectView
import freppledb.common.views
import freppledb.common.serializers
import freppledb.common.dashboard
from freppledb.common.api.views import APIIndexView
from freppledb.common.registration.views import (
ResetPasswordRequestView,
PasswordResetConfirmView,
)
# Automatically add these URLs when the application is installed
autodiscover = True
urlpatterns = [
# Cockpit screen
url(r"^$", freppledb.common.views.cockpit, name="cockpit"),
# User preferences
url(r"^preferences/$", freppledb.common.views.preferences, name="preferences"),
# Horizon updates
url(r"^horizon/$", freppledb.common.views.horizon, name="horizon"),
# Report settings
url(r"^settings/$", freppledb.common.views.saveSettings),
# Dashboard widgets
url(
r"^widget/(.+)/",
freppledb.common.dashboard.Dashboard.dispatch,
name="dashboard",
),
# Model list reports, which override standard admin screens
url(r"^data/login/$", freppledb.common.views.login),
url(
r"^data/auth/group/$",
freppledb.common.views.GroupList.as_view(),
name="auth_group_changelist",
),
url(
r"^data/common/user/$",
freppledb.common.views.UserList.as_view(),
name="common_user_changelist",
),
url(
r"^data/common/bucket/$",
freppledb.common.views.BucketList.as_view(),
name="common_bucket_changelist",
),
url(
r"^data/common/bucketdetail/$",
freppledb.common.views.BucketDetailList.as_view(),
name="common_bucketdetail_changelist",
),
url(
r"^data/common/parameter/$",
freppledb.common.views.ParameterList.as_view(),
name="common_parameter_changelist",
),
url(
r"^data/common/comment/$",
freppledb.common.views.CommentList.as_view(),
name="common_comment_changelist",
),
# Special case of the next line for user password changes in the user edit screen
url(
r"detail/common/user/(?P<id>.+)/password/$",
RedirectView.as_view(url="/data/common/user/%(id)s/password/"),
),
# Detail URL for an object, which internally redirects to the view for the last opened tab
url(r"^detail/([^/]+)/([^/]+)/(.+)/$", freppledb.common.views.detail),
# REST API framework
url(r"^api/common/bucket/$", freppledb.common.serializers.BucketAPI.as_view()),
url(
r"^api/common/bucketdetail/$",
freppledb.common.serializers.BucketDetailAPI.as_view(),
),
url(
r"^api/common/bucketdetail/$",
freppledb.common.serializers.BucketDetailAPI.as_view(),
),
url(
r"^api/common/parameter/$", freppledb.common.serializers.ParameterAPI.as_view()
),
url(r"^api/common/comment/$", freppledb.common.serializers.CommentAPI.as_view()),
url(
r"^api/common/bucket/(?P<pk>(.+))/$",
freppledb.common.serializers.BucketdetailAPI.as_view(),
),
url(
r"^api/common/bucketdetail/(?P<pk>(.+))/$",
freppledb.common.serializers.BucketDetaildetailAPI.as_view(),
),
url(
r"^api/common/parameter/(?P<pk>(.+))/$",
freppledb.common.serializers.ParameterdetailAPI.as_view(),
),
url(
r"^api/common/comment/(?P<pk>(.+))/$",
freppledb.common.serializers.CommentdetailAPI.as_view(),
),
url(r"^api/$", APIIndexView),
url(r"^about/$", freppledb.common.views.AboutView, name="about"),
# Forgotten password
url(
r"^reset_password_confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$",
PasswordResetConfirmView.as_view(),
name="reset_password_confirm",
),
url(
r"^reset_password/$", ResetPasswordRequestView.as_view(), name="reset_password"
),
]
| agpl-3.0 | 6,669,151,123,050,445,000 | 33.946154 | 94 | 0.654634 | false |
luminousflux/lflux | lfluxproject/limage/migrations/0003_auto__add_field_image_content_type.py | 1 | 2650 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Image.content_type'
db.add_column('limage_image', 'content_type',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Image.content_type'
db.delete_column('limage_image', 'content_type_id')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'limage.image': {
'Meta': {'object_name': 'Image'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['limage'] | mit | -1,360,185,327,364,889,300 | 52.02 | 174 | 0.558113 | false |
msabramo/ansible | lib/ansible/modules/commands/expect.py | 11 | 7754 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: expect
version_added: 2.0
short_description: Executes a command and responds to prompts
description:
- The C(expect) module executes a command and responds to prompts
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work
options:
command:
description:
- the command module takes command to run.
required: true
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: false
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
required: false
chdir:
description:
- cd into this directory before running the command
required: false
responses:
description:
- Mapping of expected string/regex and string to respond with. If the
response is a list, successive matches return successive
responses. List functionality is new in 2.1.
required: true
timeout:
description:
- Amount of time in seconds to wait for the expected strings
default: 30
echo:
description:
- Whether or not to echo out your response strings
default: false
requirements:
- python >= 2.6
- pexpect >= 3.3
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else")
- The question, or key, under I(responses) is a python regex match. Case
insensitive searches are indicated with a prefix of C(?i)
- By default, if a question is encountered multiple times, it's string
response will be repeated. If you need different responses for successive
question matches, instead of a string response, use a list of strings as
the response. The list functionality is new in 2.1
author: "Matt Martz (@sivel)"
'''
EXAMPLES = '''
# Case insensitve password string match
- expect:
command: passwd username
responses:
(?i)password: "MySekretPa$$word"
# Generic question with multiple different responses
- expect:
command: /path/to/custom/command
responses:
Question:
- response1
- response2
- response3
'''
import datetime
import os
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
HAS_PEXPECT = False
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def response_closure(module, question, responses):
resp_gen = (u'%s\n' % to_text(r).rstrip(u'\n') for r in responses)
def wrapped(info):
try:
return resp_gen.next()
except StopIteration:
module.fail_json(msg="No remaining responses for '%s', "
"output was '%s'" %
(question,
info['child_result_list'][-1]))
return wrapped
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True),
chdir=dict(),
creates=dict(),
removes=dict(),
responses=dict(type='dict', required=True),
timeout=dict(type='int', default=30),
echo=dict(type='bool', default=False),
)
)
if not HAS_PEXPECT:
module.fail_json(msg='The pexpect python module is required')
chdir = module.params['chdir']
args = module.params['command']
creates = module.params['creates']
removes = module.params['removes']
responses = module.params['responses']
timeout = module.params['timeout']
echo = module.params['echo']
events = dict()
for key, value in responses.items():
if isinstance(value, list):
response = response_closure(module, key, value)
else:
response = u'%s\n' % to_text(value).rstrip(u'\n')
events[key.decode()] = response
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(os.path.expanduser(chdir))
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % v,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
v = os.path.expanduser(removes)
if not os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % v,
changed=False,
rc=0
)
startd = datetime.datetime.now()
try:
try:
# Prefer pexpect.run from pexpect>=4
out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo,
encoding='utf-8')
except TypeError:
# Use pexpect.runu in pexpect>=3.3,<4
out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo)
except (TypeError, AttributeError):
e = get_exception()
# This should catch all insufficient versions of pexpect
# We deem them insufficient for their lack of ability to specify
# to not echo responses via the run/runu functions, which would
# potentially leak sensentive information
module.fail_json(msg='Insufficient version of pexpect installed '
'(%s), this module requires pexpect>=3.3. '
'Error was %s' % (pexpect.__version__, e))
except pexpect.ExceptionPexpect:
e = get_exception()
module.fail_json(msg='%s' % e)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
ret = dict(
cmd=args,
stdout=out.rstrip('\r\n'),
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc is not None:
module.exit_json(**ret)
else:
ret['msg'] = 'command exceeded timeout'
module.fail_json(**ret)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,350,330,766,234,386,000 | 30.778689 | 79 | 0.607042 | false |
jptomo/rpython-lang-scheme | rpython/jit/backend/x86/runner.py | 1 | 5478 | import py
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER
from rpython.rlib import rgc
from rpython.jit.backend.x86.assembler import Assembler386
from rpython.jit.backend.x86.regalloc import gpr_reg_mgr_cls, xmm_reg_mgr_cls
from rpython.jit.backend.x86.profagent import ProfileAgent
from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU
from rpython.jit.backend.x86 import regloc
import sys
from rpython.tool.ansi_print import ansi_log
log = py.log.Producer('jitbackend')
py.log.setconsumer('jitbackend', ansi_log)
class AbstractX86CPU(AbstractLLCPU):
debug = True
supports_floats = True
supports_singlefloats = True
dont_keepalive_stuff = False # for tests
with_threads = False
frame_reg = regloc.ebp
from rpython.jit.backend.x86.arch import JITFRAME_FIXED_SIZE
all_reg_indexes = gpr_reg_mgr_cls.all_reg_indexes
gen_regs = gpr_reg_mgr_cls.all_regs
float_regs = xmm_reg_mgr_cls.all_regs
def __init__(self, rtyper, stats, opts=None, translate_support_code=False,
gcdescr=None):
AbstractLLCPU.__init__(self, rtyper, stats, opts,
translate_support_code, gcdescr)
profile_agent = ProfileAgent()
if rtyper is not None:
config = rtyper.annotator.translator.config
if config.translation.jit_profiler == "oprofile":
from rpython.jit.backend.x86 import oprofile
if not oprofile.OPROFILE_AVAILABLE:
log.WARNING('oprofile support was explicitly enabled, but oprofile headers seem not to be available')
profile_agent = oprofile.OProfileAgent()
self.with_threads = config.translation.thread
self.profile_agent = profile_agent
def set_debug(self, flag):
return self.assembler.set_debug(flag)
def setup(self):
self.assembler = Assembler386(self, self.translate_support_code)
def build_regalloc(self):
''' for tests'''
from rpython.jit.backend.x86.regalloc import RegAlloc
assert self.assembler is not None
return RegAlloc(self.assembler, False)
@rgc.no_release_gil
def setup_once(self):
self.profile_agent.startup()
if self.HAS_CODEMAP:
self.codemap.setup()
self.assembler.setup_once()
@rgc.no_release_gil
def finish_once(self):
self.assembler.finish_once()
self.profile_agent.shutdown()
def dump_loop_token(self, looptoken):
"""
NOT_RPYTHON
"""
from rpython.jit.backend.x86.tool.viewcode import machine_code_dump
data = []
label_list = [(offset, name) for name, offset in
looptoken._x86_ops_offset.iteritems()]
label_list.sort()
addr = looptoken._x86_rawstart
src = rffi.cast(rffi.CCHARP, addr)
for p in range(looptoken._x86_fullsize):
data.append(src[p])
data = ''.join(data)
lines = machine_code_dump(data, addr, self.backend_name, label_list)
print ''.join(lines)
def compile_bridge(self, faildescr, inputargs, operations,
original_loop_token, log=True, logger=None):
clt = original_loop_token.compiled_loop_token
clt.compiling_a_bridge()
return self.assembler.assemble_bridge(faildescr, inputargs, operations,
original_loop_token, log, logger)
def cast_ptr_to_int(x):
adr = llmemory.cast_ptr_to_adr(x)
return CPU386.cast_adr_to_int(adr)
cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)'
cast_ptr_to_int = staticmethod(cast_ptr_to_int)
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken)
def invalidate_loop(self, looptoken):
from rpython.jit.backend.x86 import codebuf
for addr, tgt in looptoken.compiled_loop_token.invalidate_positions:
mc = codebuf.MachineCodeBlockWrapper()
mc.JMP_l(tgt)
assert mc.get_relative_pos() == 5 # [JMP] [tgt 4 bytes]
mc.copy_to_raw_memory(addr - 1)
# positions invalidated
looptoken.compiled_loop_token.invalidate_positions = []
def get_all_loop_runs(self):
l = lltype.malloc(LOOP_RUN_CONTAINER,
len(self.assembler.loop_run_counters))
for i, ll_s in enumerate(self.assembler.loop_run_counters):
l[i].type = ll_s.type
l[i].number = ll_s.number
l[i].counter = ll_s.i
return l
class CPU386(AbstractX86CPU):
backend_name = 'x86'
NUM_REGS = 8
CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi]
supports_longlong = True
IS_64_BIT = False
def __init__(self, *args, **kwargs):
assert sys.maxint == (2**31 - 1)
super(CPU386, self).__init__(*args, **kwargs)
class CPU386_NO_SSE2(CPU386):
supports_floats = False
supports_longlong = False
class CPU_X86_64(AbstractX86CPU):
backend_name = 'x86_64'
NUM_REGS = 16
CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15]
IS_64_BIT = True
HAS_CODEMAP = True
class CPU_X86_64_SSE4(CPU_X86_64):
vector_extension = True
vector_register_size = 16
vector_horizontal_operations = True
CPU = CPU386
| mit | -441,400,426,805,864,260 | 33.89172 | 121 | 0.640745 | false |
eHealthAfrica/onadata | onadata/apps/viewer/tests/test_remongo.py | 2 | 4545 | import os
from django.conf import settings
from django.core.management import call_command
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.viewer.management.commands.remongo import Command
from onadata.libs.utils.common_tags import USERFORM_ID
class TestRemongo(TestBase):
def test_remongo_in_batches(self):
self._publish_transportation_form()
# submit 4 instances
self._make_submissions()
self.assertEqual(ParsedInstance.objects.count(), 4)
# clear mongo
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3)
# mongo db should now have 5 records
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 4)
def test_remongo_with_username_id_string(self):
self._publish_transportation_form()
# submit 1 instances
s = self.surveys[0]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
# publish and submit for a different user
self._logout()
self._create_user_and_login("harry", "harry")
self._publish_transportation_form()
s = self.surveys[1]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
self.assertEqual(ParsedInstance.objects.count(), 2)
# clear mongo
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3, username=self.user.username,
id_string=self.xform.id_string)
# mongo db should now have 2 records
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 1)
def test_indexes_exist(self):
"""
Make sure the required indexes are set, _userform_id as of now
"""
call_command('remongo')
# if index exists, ensure index returns None
# list of indexes to check for
index_list = [USERFORM_ID]
# get index info
index_info = settings.MONGO_DB.instances.index_information()
# index_info looks like this - {
# u'_id_': {u'key': [(u'_id', 1)], u'v': 1},
# u'_userform_id_1': {u'key': [(u'_userform_id', 1)], u'v': 1}}
# lets make a list of the indexes
existing_indexes = [v['key'][0][0] for v in index_info.itervalues()
if v['key'][0][1] == 1]
all_indexes_found = True
for index_item in index_list:
if index_item not in existing_indexes:
all_indexes_found = False
break
self.assertTrue(all_indexes_found)
def test_sync_mongo_with_all_option_deletes_existing_records(self):
self._publish_transportation_form()
userform_id = "%s_%s" % (self.user.username, self.xform.id_string)
initial_mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
for i in range(len(self.surveys)):
self._submit_transport_instance(i)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
# check our mongo count
self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys))
# add dummy instance
settings.MONGO_DB.instances.save(
{"_id": 12345, "_userform_id": userform_id})
# make sure the dummy is returned as part of the forms mongo instances
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
# call sync_mongo WITHOUT the all option
call_command("sync_mongo", remongo=True)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
# call sync_mongo WITH the all option
call_command("sync_mongo", remongo=True, update_all=True)
# check that we are back to just the submitted set
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys))
| bsd-2-clause | -3,514,362,526,714,842,600 | 43.126214 | 78 | 0.606381 | false |
gis4dis/poster | apps/processing/pmo/management/commands/pmo_import_watercourse_observation.py | 1 | 1400 | import logging
from django.core.management.base import BaseCommand
from apps.processing.pmo.util import util
from datetime import timedelta, datetime
from apps.processing.pmo.util.util import parse_date_range
logger = logging.getLogger(__name__)
#import_watercourse_observation
#import_watercourse_observation 2018-04-09
class Command(BaseCommand):
help = 'Import data from PMO watercourse stations. Optionally you can pass date, ' \
'otherwise it will fetch last past monday.'
def add_arguments(self, parser):
parser.add_argument('date_range', nargs='?', type=parse_date_range,
default=[None, None])
def handle(self, *args, **options):
day_from, day_to = options['date_range']
if day_from is None:
now = datetime.now()
#now = parse('2018-03-29')
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
week_start = (today - timedelta(days=now.weekday()))
last_week_start = week_start + timedelta(weeks=-1)
day_from = last_week_start
day_to = last_week_start + timedelta(1)
day = day_from
logger.info('Importing observations of PMO watercourse observation')
while(day < day_to):
logger.info('Importing from file - date: %s', day)
util.load_hod(day)
day += timedelta(1)
| bsd-3-clause | 4,288,616,242,207,365,600 | 39 | 88 | 0.632143 | false |
moniker-dns/debian-beaver | beaver/queue.py | 2 | 3627 | # -*- coding: utf-8 -*-
import Queue
import signal
import sys
import time
from beaver.transports import create_transport
from beaver.transports.exception import TransportException
from unicode_dammit import unicode_dammit
def run_queue(queue, beaver_config, logger=None):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
last_update_time = int(time.time())
queue_timeout = beaver_config.get('queue_timeout')
wait_timeout = beaver_config.get('wait_timeout')
transport = None
try:
logger.debug('Logging using the {0} transport'.format(beaver_config.get('transport')))
transport = create_transport(beaver_config, logger=logger)
failure_count = 0
while True:
if not transport.valid():
logger.info('Transport connection issues, stopping queue')
break
if int(time.time()) - last_update_time > queue_timeout:
logger.info('Queue timeout of "{0}" seconds exceeded, stopping queue'.format(queue_timeout))
break
try:
command, data = queue.get(block=True, timeout=wait_timeout)
last_update_time = int(time.time())
logger.debug('Last update time now {0}'.format(last_update_time))
except Queue.Empty:
logger.debug('No data')
continue
if command == 'callback':
if data.get('ignore_empty', False):
logger.debug('removing empty lines')
lines = data['lines']
new_lines = []
for line in lines:
message = unicode_dammit(line)
if len(message) == 0:
continue
new_lines.append(message)
data['lines'] = new_lines
if len(data['lines']) == 0:
logger.debug('0 active lines sent from worker')
continue
while True:
try:
transport.callback(**data)
break
except TransportException:
failure_count = failure_count + 1
if failure_count > beaver_config.get('max_failure'):
failure_count = beaver_config.get('max_failure')
sleep_time = beaver_config.get('respawn_delay') ** failure_count
logger.info('Caught transport exception, reconnecting in %d seconds' % sleep_time)
try:
transport.invalidate()
time.sleep(sleep_time)
transport.reconnect()
if transport.valid():
failure_count = 0
logger.info('Reconnected successfully')
except KeyboardInterrupt:
logger.info('User cancelled respawn.')
transport.interrupt()
sys.exit(0)
elif command == 'addglob':
beaver_config.addglob(*data)
transport.addglob(*data)
elif command == 'exit':
break
except KeyboardInterrupt:
logger.debug('Queue Interruped')
if transport is not None:
transport.interrupt()
logger.debug('Queue Shutdown')
| mit | 6,818,338,573,802,138,000 | 38 | 108 | 0.510339 | false |
nicolasmartinelli/PyRssMaker | addons/reddit/crawler.py | 3 | 8345 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from base.db import Article, Feed
from base.crawler import CrawlerBase
import json
import logging
import multiprocessing as mp
import re
from bs4 import BeautifulSoup
from mako.template import Template
l = logging.getLogger(__name__)
def _add_https(src):
if src and not re.match(r'^http', src):
return "https:" + src
else:
return src
def _get_full_size_src_mp(
url, link, title, http_session, root_path, addon_path, tmpl_article):
src_list = []
content = ""
img_extensions = ["jpg", "jpeg", "png", "gif"]
################################################################################################
# Get the content depending on the source
################################################################################################
if link.split('.').pop().split('?')[0] in img_extensions or 'i.reddituploads.com' in link:
src_list.append({'src_img': link})
elif link.split('.').pop() == "gifv" and 'imgur.com' in link:
src_list.append({
'video': True,
'src_gif': link[:-1],
'src_mp4': link[:-4] + "mp4",
'src_webm': link[:-4] + "webm",
})
elif 'imgur.com' in link:
soup = BeautifulSoup(http_session.get(link, timeout=30).content, "lxml")
for div in soup.findAll("div", {'class': 'post-image'}):
if div.find("div", {'class': 'video-container'}):
src_mp4 = div.find("source", {'type': 'video/mp4'})
src_mp4 = src_mp4.attrs.get("src") if src_mp4 else ""
src_mp4 = _add_https(src_mp4)
src_webm = div.find("source", {'type': 'video/webm'})
src_webm = src_webm.attrs.get("src") if src_webm else ""
src_webm = _add_https(src_webm)
src_list.append({
'video': True,
'src_mp4': src_mp4,
'src_webm': src_webm,
})
else:
src_img = div.img
src_img = src_img.attrs.get("src") if src_img else ""
if src_img:
src_img = _add_https(src_img)
src_list.append({'src_img': src_img})
elif 'vidble.com' in link:
soup = BeautifulSoup(http_session.get(link, timeout=30).content, "lxml")
for img in soup.findAll("img", {'class': 'img2'}):
src_img = 'https://vidble.com/' + img.attrs.get("src", "").replace('_med', '')
src_list.append({'src_img': src_img})
elif 'gfycat' in link:
soup = BeautifulSoup(http_session.get(link, timeout=30).content, "lxml")
for meta in soup.findAll('meta', {'property': 'og:video:iframe'}):
img_id = meta.attrs.get('content').split('/')[-1]
src_gif = 'https://thumbs.gfycat.com/{}-size_restricted.gif'.format(img_id)
src_mp4 = 'https://giant.gfycat.com/{}.mp4'.format(img_id)
src_webm = 'https://giant.gfycat.com/{}.webm'.format(img_id)
src_list.append({
'video': True,
'src_gif': src_gif,
'src_mp4': src_mp4,
'src_webm': src_webm,
})
elif 'eroshare.com' in link:
soup = BeautifulSoup(http_session.get(link, timeout=30).content, "lxml")
for img in soup.findAll("img", {'class': 'album-image'}):
src_img = _add_https(img.attrs.get("src", ""))
src_list.append({'src_img': src_img})
for video in soup.findAll("video"):
src_mp4 = video.source
src_mp4 = src_mp4.attrs.get("src") if src_mp4 else ""
src_list.append({
'video': True,
'src_mp4': src_mp4,
})
elif 'tumblr.com' in link:
soup = BeautifulSoup(http_session.get(link, timeout=30).content, "lxml")
for section in soup.findAll("section", {'id': 'posts'}):
for figure in section.findAll("figure", {'class': 'post-content'}):
if figure.img:
src_img = _add_https(figure.img.attrs.get("src", ""))
src_list.append({'src_img': src_img})
elif 'pornwebms.com' in link:
name = link.split('/')[-1][:-5]
src_list.append({
'video': True,
'src_mp4': 'http://pornwebms.com/mp4/' + name + '.mp4',
'src_webm': 'http://pornwebms.com/video/' + name + '.webm',
})
elif 'v.redd.it' in link:
playlist = '{}/DASHPlaylist.mpd'.format(link)
soup = BeautifulSoup(http_session.get(playlist, timeout=30).content, "xml")
for media in soup.findAll('BaseURL'):
src_list.append({
'video': True,
'src_mp4': '{}/{}'.format(link, media.text),
})
break
################################################################################################
# Render that shit!
################################################################################################
tmpl = Template(
filename=tmpl_article,
module_directory=root_path + "tmp",
)
content = tmpl.render(alt=title, src_list=src_list, link=link)
return (url, content)
class Crawler(CrawlerBase):
def _login(self):
http_session = super()._login()
if self.conf['login'].get('user') and self.conf['login'].get('pass'):
login_data = {
'user': self.conf['login']['user'],
'passwd': self.conf['login']['pass'],
'api_type': 'json',
}
r = http_session.post(self.conf['login']['url'], data=login_data)
if r.status_code == 200:
j = json.loads(r.content.decode('utf-8'))
http_session.headers['X-Modhash'] = j['json']['data']['modhash']
return http_session
def crawl(self):
# Retrieve content of all feeds
feeds_content = self._get_feeds_content()
# Pool to download full size src
p_full_size = mp.Pool(processes=self.conf['misc']['max_dl'])
p_full_size_lst = []
# Let's start crawling..
articles_saved = 0
for feed in self.feeds.filter_by(refresh_todo=True):
feed.date_updated = self._date_updated
feed.date_updated_rfc3339 = self._date_updated_rfc3339
soup = BeautifulSoup(feeds_content[feed.url], "xml")
for article in soup.findAll("entry"):
article_url = article.link.attrs.get("href", "")
# Article already retrieved
if self._is_article_in_db(feed, article_url):
continue
# New article
article_link = ""
for link in BeautifulSoup(article.content.text, "lxml").findAll("a"):
if link.text == "[link]":
article_link = link.attrs.get("href")
if not article_link:
continue
self._add_article_in_db({
'feed': feed,
'url': article_url,
'title': article.title.string,
'content': "src_to_get",
})
articles_saved += 1
# Download full size src
p_full_size_lst.append(
p_full_size.apply_async(
_get_full_size_src_mp, args=(
article_url,
article_link,
article.title.string.encode('utf-8').decode('utf-8'),
self.http_session,
self.conf['misc']['root_path'],
self.conf['misc']['addon_path'],
self.conf['rss']['template']['article'],
)
)
)
# Get full size src
p_full_size.close()
p_full_size.join()
for p in p_full_size_lst:
url, content = p.get()
self.db_session.query(Article).filter_by(url=url).first().content = content
# Finalize transaction
self.db_session.commit()
l.info("%s article(s) saved", articles_saved)
| mit | -4,024,817,007,599,314,400 | 38.178404 | 100 | 0.475374 | false |
Brett55/moto | moto/iotdata/models.py | 8 | 6953 | from __future__ import unicode_literals
import json
import time
import boto3
import jsondiff
from moto.core import BaseBackend, BaseModel
from moto.iot import iot_backends
from .exceptions import (
ResourceNotFoundException,
InvalidRequestException
)
class FakeShadow(BaseModel):
"""See the specification:
http://docs.aws.amazon.com/iot/latest/developerguide/thing-shadow-document-syntax.html
"""
def __init__(self, desired, reported, requested_payload, version, deleted=False):
self.desired = desired
self.reported = reported
self.requested_payload = requested_payload
self.version = version
self.timestamp = int(time.time())
self.deleted = deleted
self.metadata_desired = self._create_metadata_from_state(self.desired, self.timestamp)
self.metadata_reported = self._create_metadata_from_state(self.reported, self.timestamp)
@classmethod
def create_from_previous_version(cls, previous_shadow, payload):
"""
set None to payload when you want to delete shadow
"""
version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {})
if payload is None:
# if given payload is None, delete existing payload
# this means the request was delete_thing_shadow
shadow = FakeShadow(None, None, None, version, deleted=True)
return shadow
# we can make sure that payload has 'state' key
desired = payload['state'].get(
'desired',
previous_payload.get('state', {}).get('desired', None)
)
reported = payload['state'].get(
'reported',
previous_payload.get('state', {}).get('reported', None)
)
shadow = FakeShadow(desired, reported, payload, version)
return shadow
@classmethod
def parse_payload(cls, desired, reported):
if desired is None:
delta = reported
elif reported is None:
delta = desired
else:
delta = jsondiff.diff(desired, reported)
return delta
def _create_metadata_from_state(self, state, ts):
"""
state must be disired or reported stype dict object
replces primitive type with {"timestamp": ts} in dict
"""
if state is None:
return None
def _f(elem, ts):
if isinstance(elem, dict):
return {_: _f(elem[_], ts) for _ in elem.keys()}
if isinstance(elem, list):
return [_f(_, ts) for _ in elem]
return {"timestamp": ts}
return _f(state, ts)
def to_response_dict(self):
desired = self.requested_payload['state'].get('desired', None)
reported = self.requested_payload['state'].get('reported', None)
payload = {}
if desired is not None:
payload['desired'] = desired
if reported is not None:
payload['reported'] = reported
metadata = {}
if desired is not None:
metadata['desired'] = self._create_metadata_from_state(desired, self.timestamp)
if reported is not None:
metadata['reported'] = self._create_metadata_from_state(reported, self.timestamp)
return {
'state': payload,
'metadata': metadata,
'timestamp': self.timestamp,
'version': self.version
}
def to_dict(self, include_delta=True):
"""returning nothing except for just top-level keys for now.
"""
if self.deleted:
return {
'timestamp': self.timestamp,
'version': self.version
}
delta = self.parse_payload(self.desired, self.reported)
payload = {}
if self.desired is not None:
payload['desired'] = self.desired
if self.reported is not None:
payload['reported'] = self.reported
if include_delta and (delta is not None and len(delta.keys()) != 0):
payload['delta'] = delta
metadata = {}
if self.metadata_desired is not None:
metadata['desired'] = self.metadata_desired
if self.metadata_reported is not None:
metadata['reported'] = self.metadata_reported
return {
'state': payload,
'metadata': metadata,
'timestamp': self.timestamp,
'version': self.version
}
class IoTDataPlaneBackend(BaseBackend):
def __init__(self, region_name=None):
super(IoTDataPlaneBackend, self).__init__()
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def update_thing_shadow(self, thing_name, payload):
"""
spec of payload:
- need node `state`
- state node must be an Object
- State contains an invalid node: 'foo'
"""
thing = iot_backends[self.region_name].describe_thing(thing_name)
# validate
try:
payload = json.loads(payload)
except ValueError:
raise InvalidRequestException('invalid json')
if 'state' not in payload:
raise InvalidRequestException('need node `state`')
if not isinstance(payload['state'], dict):
raise InvalidRequestException('state node must be an Object')
if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']):
raise InvalidRequestException('State contains an invalid node')
new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)
thing.thing_shadow = new_shadow
return thing.thing_shadow
def get_thing_shadow(self, thing_name):
thing = iot_backends[self.region_name].describe_thing(thing_name)
if thing.thing_shadow is None or thing.thing_shadow.deleted:
raise ResourceNotFoundException()
return thing.thing_shadow
def delete_thing_shadow(self, thing_name):
"""after deleting, get_thing_shadow will raise ResourceNotFound.
But version of the shadow keep increasing...
"""
thing = iot_backends[self.region_name].describe_thing(thing_name)
if thing.thing_shadow is None:
raise ResourceNotFoundException()
payload = None
new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)
thing.thing_shadow = new_shadow
return thing.thing_shadow
def publish(self, topic, qos, payload):
# do nothing because client won't know about the result
return None
available_regions = boto3.session.Session().get_available_regions("iot-data")
iotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions}
| apache-2.0 | 8,142,476,949,199,819,000 | 35.025907 | 143 | 0.606932 | false |
vladryk/horizon | openstack_dashboard/dashboards/project/volumes/backups/views.py | 22 | 4331 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.volumes.backups \
import forms as backup_forms
from openstack_dashboard.dashboards.project.volumes.backups \
import tables as backup_tables
from openstack_dashboard.dashboards.project.volumes.backups \
import tabs as backup_tabs
class CreateBackupView(forms.ModalFormView):
form_class = backup_forms.CreateBackupForm
modal_header = _("Create Volume Backup")
template_name = 'project/volumes/backups/create_backup.html'
submit_label = _("Create Volume Backup")
submit_url = "horizon:project:volumes:volumes:create_backup"
success_url = reverse_lazy("horizon:project:volumes:backups_tab")
page_title = _("Create a Volume Backup")
def get_context_data(self, **kwargs):
context = super(CreateBackupView, self).get_context_data(**kwargs)
context['volume_id'] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
return {"volume_id": self.kwargs["volume_id"]}
class BackupDetailView(tabs.TabView):
tab_group_class = backup_tabs.BackupDetailTabs
template_name = 'project/volumes/backups/detail.html'
page_title = _("Volume Backup Details: {{ backup.name }}")
def get_context_data(self, **kwargs):
context = super(BackupDetailView, self).get_context_data(**kwargs)
backup = self.get_data()
table = backup_tables.BackupsTable(self.request)
context["backup"] = backup
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(backup)
return context
@memoized.memoized_method
def get_data(self):
try:
backup_id = self.kwargs['backup_id']
backup = api.cinder.volume_backup_get(self.request,
backup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve backup details.'),
redirect=self.get_redirect_url())
return backup
def get_tabs(self, request, *args, **kwargs):
backup = self.get_data()
return self.tab_group_class(request, backup=backup, **kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:volumes:index')
class RestoreBackupView(forms.ModalFormView):
form_class = backup_forms.RestoreBackupForm
modal_header = _("Restore Volume Backup")
template_name = 'project/volumes/backups/restore_backup.html'
submit_label = _("Restore Backup to Volume")
submit_url = "horizon:project:volumes:backups:restore"
success_url = reverse_lazy('horizon:project:volumes:index')
page_title = _("Restore a Volume Backup")
def get_context_data(self, **kwargs):
context = super(RestoreBackupView, self).get_context_data(**kwargs)
context['backup_id'] = self.kwargs['backup_id']
args = (self.kwargs['backup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
backup_id = self.kwargs['backup_id']
backup_name = self.request.GET.get('backup_name')
volume_id = self.request.GET.get('volume_id')
return {
'backup_id': backup_id,
'backup_name': backup_name,
'volume_id': volume_id,
}
| apache-2.0 | -4,890,105,877,113,847,000 | 38.372727 | 75 | 0.670746 | false |
tylertian/Openstack | openstack F/python-glanceclient/glanceclient/v1/client.py | 5 | 1395 | # Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.common import http
from glanceclient.v1 import images
from glanceclient.v1 import image_members
class Client(http.HTTPClient):
"""Client for the OpenStack Images v1 API.
:param string endpoint: A user-supplied endpoint URL for the glance
service.
:param string token: Token for authentication.
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
"""
def __init__(self, *args, **kwargs):
"""Initialize a new client for the Images v1 API."""
super(Client, self).__init__(*args, **kwargs)
self.images = images.ImageManager(self)
self.image_members = image_members.ImageMemberManager(self)
| apache-2.0 | 107,574,272,620,935,420 | 38.857143 | 78 | 0.688889 | false |
grilo/ansible-1 | lib/ansible/modules/cloud/openstack/os_image.py | 8 | 6880 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
id:
version_added: "2.4"
description:
- The Id of the image
required: false
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space (in GB) required to boot this image
required: false
default: None
min_ram:
description:
- The minimum ram (in MB) required to boot this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
description:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
description:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
id = dict(default=None),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
owner = dict(default=None),
min_disk = dict(type='int', default=0),
min_ram = dict(type='int', default=0),
is_public = dict(type='bool', default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(type='dict', default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
image = cloud.create_image(
name=module.params['name'],
id=module.params['id'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout'],
is_public=module.params['is_public'],
min_disk=module.params['min_disk'],
min_ram=module.params['min_ram']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 | 3,854,045,003,820,647,000 | 31.761905 | 143 | 0.598983 | false |
Succeed-Together/bakfu | core/classes.py | 2 | 4230 | # -*- coding: utf-8 -*-
'''
Main classes
'''
from abc import abstractmethod
import inspect
from itertools import chain
def get_args_and_kwargs(var):
'''Extracts args ans kwargs variables from
the following forms :
((*args),{kwargs}),
(*args),
{kwargs},
Returns :
(args,kwargs)
The form :
(*args,{kwargs}) can lead to errors
and is not accepted
'''
if isinstance(var, dict):
return (), var
elif len(var) == 2 and isinstance(var[1], dict):
return var
return var, {}
class Processor(object):
'''
The base class for the processing chain.
Each processor represents an step in the chain.
The processor acts as a wrapper to other classes.
It works with 2 steps :
#) init
#) run
'''
init_args = ()
init_kwargs = ()
run_args = ()
run_kwargs = ()
init_method = None
run_method = None
@staticmethod
def _update_kwargs(valid_list, source, target):
'''
Find items matching keys in sources from valid_list
and put them in target.
'''
for key in valid_list:
if key != 'self' and key in source:
target[key] = source.pop(key)
return target
@classmethod
def get_args_list(cls, method):
'''Get argument list from wrapped class.'''
return inspect.getargspec(method).args
@classmethod
def get_args_list_init(cls):
'''Get argument list from wrapped class when calling init.'''
if cls.init_method:
return cls.get_args_list(cls.init_method)
return ()
@classmethod
def get_args_list_run(cls):
'''Get argument list from wrapped class when calling run.'''
if cls.run_method:
return cls.get_args_list(cls.run_method)
return ()
def __init__(self, *args, **kwargs):
self._next = None
self._prev = None
self._data = {}
@abstractmethod
def run(self, caller, *args, **kwargs):
pass
@staticmethod
def init_run_static(cls, caller, predecessor, *args, **kwargs):
'''
This method will parse args, kwargs and
call functions __init__ and run with the
corresponding parameters.
'''
if '_init' in kwargs:
init_args, init_kwargs = get_args_and_kwargs(kwargs.pop('_init'))
else:
init_args, init_kwargs = (), {}
#useless block ?
#for kw in cls.init_kwargs:
#if kw in kwargs:
#init_kwargs[kw] = kwargs.pop(kw)
init_valid_keys = chain(cls.get_args_list_init(), cls.init_kwargs)
cls._update_kwargs(init_valid_keys, kwargs, init_kwargs)
if len(init_args) == 0:
init_args = args
obj = cls(*init_args, **init_kwargs)
obj._prev = predecessor
if '_run' in kwargs:
run_args, run_kwargs = get_args_and_kwargs(kwargs.pop('_run'))
else:
run_args, run_kwargs = args, kwargs
#useless block ?
#for kw in cls.run_kwargs:
#if kw in kwargs:
#run_kwargs[kw] = kwargs.pop(kw)
run_valid_keys = chain(cls.get_args_list_run(), cls.run_kwargs)
cls._update_kwargs(run_valid_keys, kwargs, run_kwargs)
if len(run_args) == 0:
run_args = args
obj.run(caller, *run_args, **run_kwargs)
return obj
@classmethod
def init_run(cls, caller, *args, **kwargs):
return Processor.init_run_static(cls, caller, *args, **kwargs)
def next(self):
'''Returns the next Processor in the chain.'''
return self._next
def prev(self):
'''Returns the previous Processor in the chain.'''
return self._prev
def get(self, key):
'''Looks for something. If not found, look in prev element.'''
if key in self._data:
return self._data.get(key)
return self.prev().get(key)
def update(self, **kwargs):
'''Update _data '''
self._data.update(kwargs)
return self
def __repr__(self):
try:
return self.registered_name
except:
return object.__repr__(self)
| bsd-3-clause | 3,905,572,750,561,465,000 | 25.111111 | 77 | 0.560757 | false |
rg3/youtube-dl | youtube_dl/extractor/carambatv.py | 20 | 3524 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
int_or_none,
try_get,
)
from .videomore import VideomoreIE
class CarambaTVIE(InfoExtractor):
_VALID_URL = r'(?:carambatv:|https?://video1\.carambatv\.ru/v/)(?P<id>\d+)'
_TESTS = [{
'url': 'http://video1.carambatv.ru/v/191910501',
'md5': '2f4a81b7cfd5ab866ee2d7270cb34a2a',
'info_dict': {
'id': '191910501',
'ext': 'mp4',
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 2678.31,
},
}, {
'url': 'carambatv:191910501',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://video1.carambatv.ru/v/%s/videoinfo.js' % video_id,
video_id)
title = video['title']
base_url = video.get('video') or 'http://video1.carambatv.ru/v/%s/' % video_id
formats = [{
'url': base_url + f['fn'],
'height': int_or_none(f.get('height')),
'format_id': '%sp' % f['height'] if f.get('height') else None,
} for f in video['qualities'] if f.get('fn')]
self._sort_formats(formats)
thumbnail = video.get('splash')
duration = float_or_none(try_get(
video, lambda x: x['annotations'][0]['end_time'], compat_str))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class CarambaTVPageIE(InfoExtractor):
_VALID_URL = r'https?://carambatv\.ru/(?:[^/]+/)+(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://carambatv.ru/movie/bad-comedian/razborka-v-manile/',
'md5': 'a49fb0ec2ad66503eeb46aac237d3c86',
'info_dict': {
'id': '475222',
'ext': 'flv',
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
'thumbnail': r're:^https?://.*\.jpg',
# duration reported by videomore is incorrect
'duration': int,
},
'add_ie': [VideomoreIE.ie_key()],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
videomore_url = VideomoreIE._extract_url(webpage)
if not videomore_url:
videomore_id = self._search_regex(
r'getVMCode\s*\(\s*["\']?(\d+)', webpage, 'videomore id',
default=None)
if videomore_id:
videomore_url = 'videomore:%s' % videomore_id
if videomore_url:
title = self._og_search_title(webpage)
return {
'_type': 'url_transparent',
'url': videomore_url,
'ie_key': VideomoreIE.ie_key(),
'title': title,
}
video_url = self._og_search_property('video:iframe', webpage, default=None)
if not video_url:
video_id = self._search_regex(
r'(?:video_id|crmb_vuid)\s*[:=]\s*["\']?(\d+)',
webpage, 'video id')
video_url = 'carambatv:%s' % video_id
return self.url_result(video_url, CarambaTVIE.ie_key())
| unlicense | 5,214,221,152,321,013,000 | 31.074074 | 86 | 0.516744 | false |
russbishop/swift | test/Driver/Dependencies/Inputs/update-dependencies-bad.py | 10 | 1072 | #!/usr/bin/env python
# update-dependencies-bad.py - Fails on bad.swift -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Fails if the input file is named "bad.swift"; otherwise dispatches to
# update-dependencies.py.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
assert sys.argv[1] == '-frontend'
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
if os.path.basename(primaryFile) == 'bad.swift':
print("Handled", os.path.basename(primaryFile))
sys.exit(1)
dir = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(dir, "update-dependencies.py"))
| apache-2.0 | -7,649,270,629,936,343,000 | 31.484848 | 78 | 0.617537 | false |
GoogleCloudPlatform/datacatalog-connectors-hive | google-datacatalog-apache-atlas-connector/tests/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper_test.py | 1 | 6756 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest import mock
from google.datacatalog_connectors.commons_test import utils
from google.datacatalog_connectors.apache_atlas import scrape
class MetadataScraperTest(unittest.TestCase):
__MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
__SCRAPE_PACKAGE = 'google.datacatalog_connectors.apache_atlas.scrape'
@mock.patch(
'{}.apache_atlas_facade.ApacheAtlasFacade'.format(__SCRAPE_PACKAGE))
def setUp(self, apache_atlas_facade):
self.__scrape = scrape.MetadataScraper({
'host': 'my_host',
'port': 'my_port',
'user': 'my_user',
'pass': 'my_pass',
})
# Shortcut for the object assigned
# to self.__scrape.__apache_atlas_facade
self.__apache_atlas_facade = apache_atlas_facade.return_value
def test_constructor_should_set_instance_attributes(self):
attrs = self.__scrape.__dict__
self.assertIsNotNone(attrs['_apache_atlas_facade'])
@mock.patch(
'{}.metadata_enricher.MetadataEnricher.enrich_entity_relationships'.
format(__SCRAPE_PACKAGE))
@mock.patch(
'{}.metadata_enricher.MetadataEnricher.enrich_entity_classifications'.
format(__SCRAPE_PACKAGE))
def test_scrape_should_return_metadata(self, enrich_entity_classifications,
enrich_entity_relationships):
# Step 1 - create the lazy object returned by Apache Atlas facade
typedef = utils.MockedObject()
typedefs = [typedef]
self.__apache_atlas_facade.get_typedefs.return_value = typedefs
# Step 2 - create the return for Atlas classifications
classifications_defs = self.__make_classification_object()
typedef.classificationDefs = classifications_defs
# Step 3 - create the return for Atlas Enum rypes
typedef.enumDefs = self.__make_enum_types_object()
# Step 4 - create the return for Atlas Entity types
entity_types = self.__make_entity_type_object()
typedef.entityDefs = entity_types
# Following steps are executed for each entity type.
# Step 5 - create the return for search results
search_results = self.__make_search_results_object()
self.__apache_atlas_facade.\
search_entities_from_entity_type.return_value = search_results
# Step 6 - create the return for fetched entities
fetched_entities_dict = \
utils.Utils.convert_json_to_object(self.__MODULE_PATH,
'fetched_entities_dict.json')
self.__apache_atlas_facade.fetch_entities.\
return_value = fetched_entities_dict
# Step 7 - create the return for entities classifications
entity_classifications = \
utils.Utils.convert_json_to_object(self.__MODULE_PATH,
'entity_classifications.json')
self.__apache_atlas_facade.\
fetch_entity_classifications.return_value = entity_classifications
metadata, _ = self.__scrape.get_metadata()
types_count = 51
self.assertEqual(3, len(metadata))
self.assertEqual(types_count, len(metadata['entity_types']))
self.assertEqual(8, len(metadata['entity_types']['Table']['entities']))
expected_table_metadata = utils.Utils.convert_json_to_object(
self.__MODULE_PATH, 'expected_table_metadata.json')
self.assertDictEqual(expected_table_metadata,
metadata['entity_types']['Table'])
enrich_entity_relationships.assert_called_once()
self.__apache_atlas_facade.get_typedefs.assert_called_once()
self.assertEqual(
types_count, self.__apache_atlas_facade.
search_entities_from_entity_type.call_count)
self.assertEqual(types_count,
self.__apache_atlas_facade.fetch_entities.call_count)
self.assertEqual(types_count, enrich_entity_classifications.call_count)
def __make_classification_object(self):
classifications = \
utils.Utils.convert_json_to_object(self.__MODULE_PATH,
'classifications.json')
classifications_obj = []
for classification in classifications:
classification_obj = utils.MockedObject()
classification_obj.name = classification['name']
classification_obj.guid = classification['guid']
classification_obj._data = classification['data']
classifications_obj.append(classification_obj)
return classifications_obj
def __make_entity_type_object(self):
entity_types = \
utils.Utils.convert_json_to_object(self.__MODULE_PATH,
'entity_types.json')
entity_types_obj = []
for entity_type in entity_types:
entity_type_obj = utils.MockedObject()
entity_type_obj.name = entity_type['name']
entity_type_obj.superTypes = entity_type['superTypes']
entity_type_obj._data = entity_type['data']
entity_types_obj.append(entity_type_obj)
return entity_types_obj
def __make_search_results_object(self):
search_results = \
utils.Utils.convert_json_to_object(self.__MODULE_PATH,
'search_results.json')
search_results_obj = []
for search_result in search_results:
search_result_obj = utils.MockedObject()
search_result_obj.guid = search_result['guid']
search_result_obj._data = search_result['data']
search_results_obj.append(search_result_obj)
return search_results_obj
@classmethod
def __make_enum_types_object(cls):
enum_types_obj = []
enum_type_obj = utils.MockedObject()
enum_type_obj.name = 'my_enum_type'
enum_type_obj._data = {}
enum_type_obj.guid = '123'
enum_types_obj.append(enum_type_obj)
return enum_types_obj
| apache-2.0 | 1,052,945,719,632,120,700 | 39.698795 | 79 | 0.626258 | false |
pythonitalia/assopy | assopy/stripe/tests/test_templatetags.py | 2 | 2522 | from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.template import Template, Context
from . import factories as f
class TestStripeTemplateTags(TestCase):
def setUp(self):
# clean fares
from conference.models import Fare
Fare.objects.all().delete()
def test_stripe_checkout_script_template_tag(self):
"""
Tests that the 'stripe_checkout_script' template tag works properly
"""
fare = f.FareFactory()
order = f.OrderFactory(items=[(fare, {"qty": 1})])
t = Template("{% load stripe_tags %}{% stripe_checkout_script order %}")
data = t.render(Context({"order": order}))
self.assertIn('"https://checkout.stripe.com/checkout.js" class="stripe-button"', data)
self.assertIn('data-key="pk_test_qRUg4tJTFJgUiLz0FxKnuOXO"', data)
self.assertIn('data-amount="1000"', data)
self.assertIn('data-name="Foo Bar"', data)
self.assertIn('data-description="%s"' % order.orderitem_set.all()[0].description, data)
self.assertIn('data-image="foo-bar-logo-url"', data)
self.assertIn('data-currency="EUR"', data)
self.assertIn('data-allow-remember-me="false"', data)
self.assertIn('data-email="%s"' % order.user.user.email, data)
def test_stripe_checkout_form_template_tag(self):
"""
Tests that the 'stripe_checkout_form' template tag works properly
"""
fare = f.FareFactory()
order = f.OrderFactory(items=[(fare, {"qty": 1})])
t = Template("{% load stripe_tags %}{% stripe_checkout_form order %}")
data = t.render(Context({"order": order}))
url = reverse("assopy-stripe-checkout", args=(order.pk,))
self.assertIn('<form action="%s" method="POST">' % url, data)
self.assertIn('"https://checkout.stripe.com/checkout.js" class="stripe-button"', data)
self.assertIn('data-key="pk_test_qRUg4tJTFJgUiLz0FxKnuOXO"', data)
self.assertIn('data-amount="1000"', data)
self.assertIn('data-name="Foo Bar"', data)
self.assertIn('data-description="%s"' % order.orderitem_set.all()[0].description, data)
self.assertIn('data-image="foo-bar-logo-url"', data)
self.assertIn('data-currency="EUR"', data)
self.assertIn('data-allow-remember-me="false"', data)
self.assertIn('data-email="%s"' % order.user.user.email, data)
| bsd-2-clause | 399,961,390,798,222,200 | 43.245614 | 95 | 0.635607 | false |
CiscoSystems/tempest | tempest/api/image/v2/test_images_member.py | 3 | 4250 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest import test
class ImagesMemberTest(base.BaseV2MemberImageTest):
_interface = 'json'
@test.attr(type='gate')
def test_image_share_accept(self):
image_id = self._create_image()
member = self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'pending')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
body = self.os_img_client.get_image_membership(image_id)
members = body['members']
member = members[0]
self.assertEqual(len(members), 1, str(members))
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'accepted')
@test.attr(type='gate')
def test_image_share_reject(self):
image_id = self._create_image()
member = self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'pending')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'rejected')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
@test.attr(type='gate')
def test_get_image_member(self):
image_id = self._create_image()
self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
member = self.os_img_client.get_member(image_id,
self.alt_tenant_id)
self.assertEqual(self.alt_tenant_id, member['member_id'])
self.assertEqual(image_id, member['image_id'])
self.assertEqual('accepted', member['status'])
@test.attr(type='gate')
def test_remove_image_member(self):
image_id = self._create_image()
self.os_img_client.add_member(image_id,
self.alt_tenant_id)
self.alt_img_client.update_member_status(image_id,
self.alt_tenant_id,
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
self.os_img_client.remove_member(image_id, self.alt_tenant_id)
self.assertNotIn(image_id, self._list_image_ids_as_alt())
@test.attr(type='gate')
def test_get_image_member_schema(self):
body = self.os_img_client.get_schema("member")
self.assertEqual("member", body['name'])
@test.attr(type='gate')
def test_get_image_members_schema(self):
body = self.os_img_client.get_schema("members")
self.assertEqual("members", body['name'])
| apache-2.0 | -77,989,964,925,644,900 | 45.195652 | 78 | 0.568941 | false |
cchanning/Impala | tests/common/impala_cluster_cm.py | 14 | 11556 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Basic object model for an Impala cluster. Basic model is an "Impala Service" which
# represents a collection of ImpalaD processes and a State Store service. The Impala
# service is associated with an ImpalaCluster which has information on all the hosts
# / machines in the cluster along with the different services available (currently
# only Impala.
# To authenticate remote operation over SSH set the IMPALA_SSH_PRIVATE_KEY,
# IMPALA_SSH_PRIVATE_KEY_PASSWORD (if applicable), and IMPALA_SSH_USER environment
# variables. If not set the current user and default keys will be used.
#
# Dependencies:
# paramiko - Used to perform remote SSH commands. To install run 'easy_install paramiko'
# cm_api - Used to interact with cluster environment - Visit cloudera.github.com/cm_api/
# for installation details.
import cmd
import logging
import time
import os
import sys
import json
import paramiko
import urllib
from collections import defaultdict
from cm_api.api_client import ApiResource
from datetime import datetime
from optparse import OptionParser
from paramiko import PKey
logging.basicConfig(level=logging.ERROR, format='%(threadName)s: %(message)s')
LOG = logging.getLogger('impala_cluster')
LOG.setLevel(level=logging.DEBUG)
# Environment variables that control how to execute commands on remote machines
IMPALA_SSH_PRIVATE_KEY = os.environ.get('IMPALA_PRIVATE_KEY', None)
IMPALA_SSH_PRIVATE_KEY_PASSWORD = os.environ.get('IMPALA_PRIVATE_KEY_PASSWORD', str())
IMPALA_SSH_USER = os.environ.get('IMPALA_SSH_USER', 'impala')
# Represents a set of Impala services, processes, and machines they are running on
class ImpalaCluster(object):
def __init__(self, cm_host, cm_cluster_name, username, password):
self.cm_api = ApiResource(cm_host, username=username, password=password)
self.hosts = dict()
self.services = list()
self.cluster = self.cm_api.get_cluster(cm_cluster_name)
if self.cluster is None:
raise RuntimeError, 'Cluster name "%s" not found' % cm_cluster_name
self.__load_hosts()
self.__impala_service = ImpalaService(self)
def _get_all_services(self):
return self.cluster.get_all_services()
def get_impala_service(self):
return self.__impala_service
def __load_hosts(self):
self.hosts = dict()
# Search for all hosts that are in the target cluster.
# There is no API that provides the list of host in a given cluster, so to find them
# we must loop through all the hosts and check the cluster name matches.
for host_info in self.cm_api.get_all_hosts():
# host_info doesn't include a link to the roleRef so need to do another lookup
# based on the hostId.
host = self.cm_api.get_host(host_info.hostId)
for roleRef.get('clusterName') == self.cluster_name:
self.hosts[host_info.hostId] = Host(host)
break
# Base class for Cluster service objects
class ClusterService(object):
def __init__(self):
pass
def start(self):
raise NotImplementedError, 'This method is NYI'
def stop(self):
raise NotImplementedError, 'This method is NYI'
def restart(self):
raise NotImplementedError, 'This method is NYI'
# Represents an Impala service - a set of ImpalaD processes and a statestore.
class ImpalaService(ClusterService):
def __init__(self, cluster):
self.__parent_cluster = cluster
self.__state_store_process = None
self.__impalad_processes = list()
self.__impala_service = self.__get_impala_service_internal()
if self.__impala_service is None:
raise RuntimeError, 'No Impala service found on cluster'
# For each service, CM has a set of roles. A role is a lightweight object
# that provides a link between a physical host machine and a logical service.
# Here that information is used to determine where all the impala processes
# are actually located (what machines).
for role in self.__impala_service.get_all_roles():
if 'STATESTORE' in role.name:
self.__state_store_process = ImpalaStateStoreProcess(self,
self.__parent_cluster.hosts[role.hostRef.hostId], role)
elif 'IMPALAD' in role.name:
self.__impalad_processes.append(ImpaladProcess(
self.__parent_cluster.hosts[role.hostRef.hostId], role))
else:
raise RuntimeError, 'Unknown Impala role type'
def get_state_store_process(self):
""" Returns the state store process """
return self.__state_store_process
def get_impalad_process(self, hostname):
""" Returns the impalad process running on the specified hostname """
return first(self.__impalad_processes,
lambda impalad: impalad.hostname == hostname)
def get_all_impalad_processes(self):
return self.__impalad_processes
def __get_impala_service_internal(self):
return first(self.__parent_cluster._get_all_services(),
lambda service: 'impala' in service.name)
def set_process_auto_restart_config(self, value):
""" Sets the process_auto_restart configuration value.
If set, Impala processes will automatically restart if the process dies
"""
self.__update_configuration('process_auto_restart', str(value).lower())
def __update_configuration(self, name, value):
for role in self.__impala_service.get_all_roles():
role.update_config({name: value})
LOG.debug('Updated Config Value: %s/%s' % (role.name, role.get_config()))
def start(self):
""" Starts all roles/processes of the service """
LOG.debug("Starting ImpalaService")
self.__impala_service.start()
self.__wait_for_service_state('STARTED')
def restart(self):
""" Restarts all roles/processes of the service """
LOG.debug("Restarting ImpalaService")
self.__impala_service.restart()
self.__wait_for_service_state('STARTED')
def stop(self):
""" Stops all roles/processes of the service """
LOG.debug("Stopping ImpalaService")
self.__impala_service.stop()
self.__wait_for_service_state('STOPPED')
def get_health_summary(self):
return self.__get_impala_service_internal().healthSummary
def state(self):
"""
Gets the current state of the service (a string value).
Possible values are STOPPED, STOPPING, STARTED, STARTING, UNKNOWN
"""
return self.__get_impala_service_internal().serviceState
def __wait_for_service_state(self, desired_state, timeout=0):
""" Waits for the service to reach the specified state within the given time(secs) """
current_state = self.state()
start_time = datetime.now()
while current_state.upper() != desired_state.upper():
LOG.debug('Current Impala Service State: %s Waiting For: %s' % (current_state,
desired_state))
# Sleep for a bit to give the serivce time to reach the target state.
time.sleep(1)
# Get the current service state.
current_state = self.state()
if timeout != 0 and (datetime.now() - start_time).seconds > timeout:
raise RuntimeError, 'Did not reach desired state within %d seconds.' % timeout
# Represents one host/machine in the cluster.
class Host(object):
def __init__(self, cm_host):
self.cm_host = cm_host
self.hostname = cm_host.hostname
self.ssh_client = paramiko.SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def exec_cmd(self, cmd):
""" Executes a command on the machine using SSH """
self.ssh_client.connect(hostname=self.hostname, username=IMPALA_SSH_USER)
LOG.debug('Executing on host: %s Command: "%s"' % (self.hostname, cmd))
rsa_key = None
# TODO: Support other key types besides RSA
if IMPALA_SSH_PRIVATE_KEY is not None:
paramiko.RSAKey.from_private_key_file(filename=IMPALA_SSH_PRIVATE_KEY,
password=IMPALA_SSH_PRIVATE_KEY_PASSWORD)
stdin, stdout, stderr = self.ssh_client.exec_command(cmd, rsa_key)
stdout_str = stdout.read()
stderr_str = stderr.read()
if stdout_str: LOG.debug(stdout_str.strip())
if stderr_str: LOG.debug(stderr_str.strip())
stdout.close()
stderr.close()
self.ssh_client.close()
return stdout_str, stderr_str
# Represents a single process running on a machine
class Process(object):
def __init__(self, host, process_name):
self.name = process_name
self.host = host
self.hostname = host.hostname
self.__pid = None
def kill(self):
""" Kill the process if it is running, if not running this will be a no-op """
pid = self.get_pid()
if pid is not None and pid > 0:
self.host.exec_cmd('sudo kill -9 %d' % self.get_pid())
else:
LOG.debug('Skipping kill of pid: %s on host: %s' % (pid, self.hostname))
def get_pid(self):
""" Returns the process' current pid """
stdout, stderr = self.host.exec_cmd('sudo /sbin/pidof %s' % self.name)
pids = [pid.strip() for pid in stdout.split()]
# Note: This is initialized to -2 instead of -1 because 'kill -1' kills all processes.
self.__pid = -2
if len(pids) > 1:
raise RuntimeError, 'Error - %d PIDs detected. Expected 1' % len(pids)
elif len(pids) == 1:
self.__pid = int(pids[0])
return self.__pid
def is_running(self):
return self.get_pid() > 0
# Represents a single Impala statestore process
class ImpalaStateStoreProcess(Process):
def __init__(self, parent_service, host, cm_role, metrics_port=9190):
Process.__init__(self, host, 'impala-statestore');
self.metrics_port = metrics_port
self.role = cm_role
def get_impala_backend(self, hostname):
"""Returns the impala backend on the specified host."""
return first(self.get_live_impala_backends(),
lambda backend: backend.split(':')[0] == hostname)
def get_live_impala_backends(self):
"""Returns a list of host:be_port strings of live impalad instances."""
metrics_page = urllib.urlopen("http://%s:%d/jsonmetrics" %\
(self.host, int(self.metrics_port)))
return json.loads(metrics_page.read())['statestore.live.backends.list']
def __str__(self):
return 'Name: %s Host: %s' % (self.name, self.hostname)
# Represents a single Impalad process
class ImpaladProcess(Process):
def __init__(self, host, cm_role, be_port=22000, beeswax_port=21000):
Process.__init__(self, host, 'impalad');
self.role = cm_role
self.host = host
self.be_port = be_port
self.beeswax_port = beeswax_port
self.__pid = None
def get_pid(self):
try:
self.__pid = super(ImpaladProcess, self).get_pid()
except RuntimeError, e:
# There could be multiple ImpalaD instances running on the same
# machine (local testing case). Fall back to this method for getting the pid.
LOG.info('Multiple PIDs found for Impalad service. Attempting to get PID based on '\
'the the be_port: %s', e)
stdout, stderr = self.host.exec_cmd(
'lsof -i:%d | awk \'{print $2}\' | tail -n 1' % self.be_port)
self.__pid = int(stdout) if stdout else -1
return self.__pid
def __str__(self):
return 'Name: %s, Host: %s BE Port: %d Beeswax Port: %d PID: %s'\
% (self.name, self.hostname, self.be_port, self.beeswax_port, self.__pid)
def first(collection, match_function):
""" Returns the first item in the collection that satisfies the match function """
return next((item for item in collection if match_function(item)), None)
| apache-2.0 | 2,030,616,690,927,677,000 | 38.040541 | 90 | 0.679647 | false |
czgu/opendataexperience | env/lib/python2.7/site-packages/django/contrib/auth/tests/test_forms.py | 35 | 19779 | from __future__ import unicode_literals
import os
import re
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core import mail
from django.forms.fields import Field, CharField
from django.test import TestCase, override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.error_messages['duplicate_username'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: [email protected]>')
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
UserModel = get_user_model()
username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
)
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', '[email protected]', 'test')
data = {"email": "[email protected]"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$', message.get_payload(1).get_payload()))
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field._has_changed('aaa', 'bbb'))
| apache-2.0 | 7,090,900,627,293,868,000 | 37.935039 | 143 | 0.624754 | false |
JesusAMR/ProgramasUNI | Romanos.py | 1 | 1218 | #! /usr/bin/python
import sys
inp = raw_input("Inserte el numero: ")
class Romano:
def __init__(self, entrada):
self.lstnRom = { 1: "I", 2: "X", 3: "C", 4: "M" }
self.lstcRom = { 1: "V", 2: "L", 3: "D"}
self.entrada = entrada
self.lon = len(entrada)
self.lim = self.lon
self.result = ""
def conversion(self):
for i in xrange(0,self.lon):
if ( int(self.entrada[i]) in range(1,4)):
self.result = self.result + self.lstnRom[self.lon] * int(self.entrada[i])
self.lon = self.lon - 1
elif( int(self.entrada[i]) == 0):
self.lon = self.lon - 1
elif( int(self.entrada[i]) == 4):
self.result = self.result + self.lstnRom[self.lon] + self.lstcRom[self.lon]
self.lon = self.lon - 1
elif( int(self.entrada[i]) == 5):
self.result = self.result + self.lstcRom[self.lon]
self.lon = self.lon - 1
elif( int(self.entrada[i]) in range(6,9)):
self.result = self.result + lstcRom[lon] + self.lstnRom[self.lon] * (int(self.entrada[i])-5)
self.lon = self.lon - 1
elif( int(self.entrada[i]) == 9):
self.result = self.result + self.lstnRom[self.lon] + self.lstnRom[self.lon+1]
self.lon = self.lon -1
return self.result
x = Romano(inp)
print(x.conversion())
| gpl-3.0 | 1,153,205,829,685,655,600 | 33.8 | 96 | 0.609195 | false |
mathiasertl/django-ca | ca/django_ca/tests/tests_command_cache_crls.py | 1 | 2929 | # This file is part of django-ca (https://github.com/mathiasertl/django-ca).
#
# django-ca is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# django-ca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-ca. If not,
# see <http://www.gnu.org/licenses/>
"""Test the cache_crls management command."""
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import Encoding
from django.core.cache import cache
from django.test import TestCase
from freezegun import freeze_time
from ..utils import get_crl_cache_key
from .base import override_tmpcadir
from .base import timestamps
from .base.mixins import TestCaseMixin
class CacheCRLsTestCase(TestCaseMixin, TestCase):
"""Main test class for this command."""
load_cas = "__usable__"
@override_tmpcadir()
@freeze_time(timestamps["everything_valid"])
def test_basic(self) -> None:
"""Test the basic command.
Note: Without an explicit serial expired CAs are excluded, that's why we need @freeze_time().
"""
stdout, stderr = self.cmd("cache_crls")
self.assertEqual(stdout, "")
self.assertEqual(stderr, "")
for ca in self.cas.values():
key = get_crl_cache_key(ca.serial, hashes.SHA512(), Encoding.DER, "ca")
crl = x509.load_der_x509_crl(cache.get(key), default_backend())
self.assertIsNotNone(crl)
self.assertIsInstance(crl.signature_hash_algorithm, hashes.SHA512)
key = get_crl_cache_key(ca.serial, hashes.SHA512(), Encoding.DER, "user")
crl = x509.load_der_x509_crl(cache.get(key), default_backend())
self.assertIsNotNone(crl)
@override_tmpcadir()
def test_serial(self) -> None:
"""Test passing an explicit serial."""
stdout, stderr = self.cmd("cache_crls", self.ca.serial)
self.assertEqual(stdout, "")
self.assertEqual(stderr, "")
key = get_crl_cache_key(self.ca.serial, hashes.SHA512(), Encoding.DER, "ca")
crl = x509.load_der_x509_crl(cache.get(key), default_backend())
self.assertIsNotNone(crl)
self.assertIsInstance(crl.signature_hash_algorithm, hashes.SHA512)
key = get_crl_cache_key(self.ca.serial, hashes.SHA512(), Encoding.DER, "user")
crl = x509.load_der_x509_crl(cache.get(key), default_backend())
self.assertIsNotNone(crl)
| gpl-3.0 | -329,267,803,858,479,300 | 38.581081 | 101 | 0.692386 | false |
ParticulateFlow/Palabos-PFM | scons/scons-local-2.1.0/SCons/Platform/irix.py | 21 | 1664 | """SCons.Platform.irix
Platform-specific initialization for SGI IRIX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/irix.py 5357 2011/09/09 21:31:03 bdeegan"
import posix
def generate(env):
posix.generate(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| agpl-3.0 | -4,935,553,149,364,765,000 | 36.818182 | 101 | 0.765024 | false |
openstack/cinder | cinder/tests/hacking/checks.py | 2 | 14111 | # Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
from hacking import core
"""
Guidelines for writing new hacking checks
- Use only for Cinder specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
cinder/tests/unit/test_hacking.py
"""
# NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects
UNDERSCORE_IMPORT_FILES = ['cinder/objects/__init__.py',
'cinder/objects/manageableresources.py']
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
translated_log = re.compile(
r"(.)*LOG\.(audit|debug|error|info|warn|warning|critical|exception)"
r"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"(.)*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*i18n\s+import(.)* _$")
underscore_import_check_multi = re.compile(r"(.)*i18n\s+import(.)* _, (.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
no_print_statements = re.compile(r"\s*print\s*\(.+\).*")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
logging_instance = re.compile(
r"(.)*LOG\.(warning|info|debug|error|exception)\(")
assert_True = re.compile(
r".*assertEqual\(True, .*\)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
# Need to disable pylint check here as it doesn't catch CHECK_DESC
# being defined in the subclasses.
message = message or self.CHECK_DESC # pylint: disable=E1101
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
@core.flake8ext
def no_translate_logs(logical_line, filename):
"""Check for 'LOG.*(_('
Starting with the Pike series, OpenStack no longer supports log
translation. We shouldn't translate logs.
- This check assumes that 'LOG' is a logger.
- Use filename so we can start enforcing this in specific folders
instead of needing to do so all at once.
C312
"""
if translated_log.match(logical_line):
yield(0, "C312: Log messages should not be translated!")
@core.flake8ext
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate messages are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
for file in UNDERSCORE_IMPORT_FILES:
if file in filename:
return
if (underscore_import_check.match(logical_line) or
underscore_import_check_multi.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif string_translation.match(logical_line):
yield(0, "N323: Found use of _() without explicit import of _ !")
class CheckLoggingFormatArgs(BaseASTChecker):
"""Check for improper use of logging format arguments.
LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.",
('volume1', 500))
The format arguments should not be a tuple as it is easy to miss.
"""
name = 'check_logging_format_args'
version = '1.0'
CHECK_DESC = 'C310 Log method arguments should not be a tuple.'
LOG_METHODS = [
'debug', 'info',
'warn', 'warning',
'error', 'exception',
'critical', 'fatal',
'trace', 'log'
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, str):
return node
else: # could be Subscript, Call or many more
return None
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# obj must be a logger instance and method must be a log helper
if (obj_name != 'LOG'
or method_name not in self.LOG_METHODS):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# any argument should not be a tuple
for arg in node.args:
if isinstance(arg, ast.Tuple):
self.add_error(arg)
return super(CheckLoggingFormatArgs, self).generic_visit(node)
class CheckOptRegistrationArgs(BaseASTChecker):
"""Verifying the registration of options are well formed
This class creates a check for single opt or list/tuple of
opts when register_opt() or register_opts() are being called.
"""
name = 'check_opt_registrationg_args'
version = '1.0'
CHECK_DESC = ('C311: Arguments being passed to register_opt/register_opts '
'must be a single option or list/tuple of options '
'respectively. Options must also end with _opt or _opts '
'respectively.')
singular_method = 'register_opt'
plural_method = 'register_opts'
register_methods = [
singular_method,
plural_method,
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, str):
return node
else: # could be Subscript, Call or many more
return None
def _is_list_or_tuple(self, obj):
return isinstance(obj, (ast.List, ast.Tuple))
def visit_Call(self, node):
"""Look for the register_opt/register_opts calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
if not isinstance(node.func.value, ast.Name):
return (super(CheckOptRegistrationArgs,
self).generic_visit(node))
method_name = node.func.attr
# obj must be instance of register_opt() or register_opts()
if method_name not in self.register_methods:
return (super(CheckOptRegistrationArgs,
self).generic_visit(node))
if len(node.args) > 0:
argument_name = self._find_name(node.args[0])
if argument_name:
if (method_name == self.singular_method and
not argument_name.lower().endswith('opt')):
self.add_error(node.args[0])
elif (method_name == self.plural_method and
not argument_name.lower().endswith('opts')):
self.add_error(node.args[0])
else:
# This covers instances of register_opt()/register_opts()
# that are registering the objects directly and not
# passing in a variable referencing the options being
# registered.
if (method_name == self.singular_method and
self._is_list_or_tuple(node.args[0])):
self.add_error(node.args[0])
elif (method_name == self.plural_method and not
self._is_list_or_tuple(node.args[0])):
self.add_error(node.args[0])
return super(CheckOptRegistrationArgs, self).generic_visit(node)
@core.flake8ext
def check_datetime_now(logical_line, noqa):
if noqa:
return
msg = ("C301: Found datetime.now(). "
"Please use timeutils.utcnow() from oslo_utils.")
if 'datetime.now' in logical_line:
yield(0, msg)
@core.flake8ext
def check_no_print_statements(logical_line, filename, noqa):
# CLI and utils programs do need to use 'print()' so
# we shouldn't check those files.
if noqa:
return
if "cinder/cmd" in filename or "tools/" in filename:
return
if re.match(no_print_statements, logical_line):
msg = ("C303: print() should not be used. "
"Please use LOG.[info|error|warning|exception|debug]. "
"If print() must be used, use '# noqa' to skip this check.")
yield(0, msg)
@core.flake8ext
def check_timeutils_strtime(logical_line):
msg = ("C306: Found timeutils.strtime(). "
"Please use datetime.datetime.isoformat() or datetime.strftime()")
if 'timeutils.strtime' in logical_line:
yield(0, msg)
@core.flake8ext
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor "
"with a sequence of key-value pairs.")
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_timeutils_isotime(logical_line):
msg = ("C308: Found timeutils.isotime(). "
"Please use datetime.datetime.isoformat()")
if 'timeutils.isotime' in logical_line:
yield(0, msg)
@core.flake8ext
def no_test_log(logical_line, filename, noqa):
if ('cinder/tests' not in filename or noqa):
return
msg = "C309: Unit tests should not perform logging."
if logging_instance.match(logical_line):
yield (0, msg)
@core.flake8ext
def validate_assertTrue(logical_line, filename):
# Note: a comparable check cannot be implemented for
# assertFalse(), because assertFalse(None) passes.
# Therefore, assertEqual(False, value) is required to
# have the strongest test.
if 'cinder/tests/unit' not in filename:
return
if re.match(assert_True, logical_line):
msg = ("C313: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield(0, msg)
third_party_mock = re.compile("^import.mock")
from_third_party_mock = re.compile("^from.mock.import")
@core.flake8ext
def no_third_party_mock(logical_line):
# We should only use unittest.mock, not the third party mock library that
# was needed for py2 support.
if (re.match(third_party_mock, logical_line) or
re.match(from_third_party_mock, logical_line)):
msg = ('C337: Unit tests should use the standard library "mock" '
'module, not the third party mock lib.')
yield(0, msg)
| apache-2.0 | -5,474,935,649,176,515,000 | 35.368557 | 79 | 0.619588 | false |
mburst/gevent-socketio-starterkit | fantasy/core/sockets.py | 1 | 5277 | from core.models import *
from socketio.namespace import BaseNamespace
from socketio.mixins import RoomsMixin, BroadcastMixin
from socketio.sdjango import namespace
import logging
import redis
from collections import defaultdict
def nested_defaultdict():
return defaultdict(dict)
@namespace('/home')
class HomeNamespace(BaseNamespace, BroadcastMixin):
def initialize(self):
'''self.logger = logging.getLogger("socketio.chat")
self.log("Socketio session started")'''
def log(self, message):
self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
def on_create_league(self, league_name):
league = League.objects.create(name=league_name)
self.broadcast_event('new_league', {'id': league.id, 'name': league.name})
#Try and join a league by making sure that it exists and that it is not full
def on_join_league(self, league_info):
try:
league = League.objects.get(id=league_info['league_id'])
except Exception, e:
self.error('Bad League ID', 'Unable to join the specified league. Please try again')
return
if not league.locked:
team = Team.objects.create(name=league_info['team_name'], league=league, owner=self.request.user)
self.broadcast_event_not_me('new_member', {'full': league.full(), 'league_id': league.id, 'team_name': team.name})
self.emit('redirect', league.get_draft_url())
else:
self.error('Locked', 'That league is currently full')
@namespace('/draft')
class DraftNamespace(BaseNamespace, RoomsMixin):
drafts = defaultdict(nested_defaultdict)
def initialize(self):
self.r = redis.StrictRedis(host='localhost', port=6379, db=0)
#Loads when our client connects. Lists all default methods client has access to
def get_initial_acl(self):
return ['on_join', 'recv_connect', 'recv_disconnect']
def recv_disconnect(self):
del self.drafts[self.league.id]['sockets'][self.team.id]
self.disconnect(silent=True)
#The rooms mixin loops through each socket and checks if it has access to the room
#This just emits to only the sockets in our league so it's more efficient
#I've left the commented out rooms code for comparison though
def emit_to_league(self, league_id, event, msg):
for socket in self.drafts[league_id]['sockets'].itervalues():
socket.emit(event, msg)
def on_join(self, league_id):
#self.join('draft_' + league_id)
self.league = League.objects.get(id=league_id)
self.team = Team.objects.get(league=league_id, owner=self.request.user)
league_id = int(league_id)
self.drafts[league_id]['sockets'][self.team.id] = self
self.emit_to_league(league_id, 'new_team', {'id': self.team.id, 'name': self.team.name})
#Check if we've reconnected and it's our pick
current_pick = self.drafts[self.league.id].get('current_pick')
if current_pick == self.team.id:
self.drafts[self.league.id]['sockets'][current_pick].add_acl_method('on_pick')
#Start the draft now that everyone is here
if len(self.drafts[league_id]['sockets']) == 4 and self.league.locked == False:
self.league.locked = True
self.league.save()
self.league.generate_draft_order()
self.drafts[league_id]['current_pick'] = ''
self.pick_logic()
def on_pick(self, player_id):
player = Player.objects.get(id=player_id)
self.team.players.add(player)
#self.emit_to_room('draft_' + str(self.league.id), 'player_drafted', {'player': player_id, 'team': self.team.id})
self.emit_to_league(self.league.id, 'player_drafted', {'player': player_id, 'team': self.team.id})
self.pick_logic()
def pick_logic(self):
current_pick = self.drafts[self.league.id].get('current_pick')
#Delete pick method access from person who just picked
if current_pick:
self.drafts[self.league.id]['sockets'][current_pick].del_acl_method('on_pick')
#Grab next drafer. This list is sent to redis from the models.py
drafter = self.r.lpop('draft_order_' + str(self.league.id))
#Set the current pick and give the socket access to the on_pick method
#lpop will return none if there aren't anymore drafters in the list
#So when it equals none we emit the draft_over event
if drafter:
drafter = int(drafter)
self.drafts[self.league.id]['current_pick'] = drafter
self.drafts[self.league.id]['sockets'][drafter].add_acl_method('on_pick')
#self.emit_to_room('draft_' + str(self.league.id), 'new_drafter', self.drafts[self.league.id]['sockets'][drafter].team.name)
self.emit_to_league(self.league.id, 'new_drafter', self.drafts[self.league.id]['sockets'][drafter].team.name)
else:
self.emit_to_league(self.league.id, 'draft_over', "The Draft is now over!") | mit | -3,127,850,271,263,903,000 | 45.133929 | 136 | 0.620428 | false |
palichis/elmolino | elmolino/urls.py | 1 | 1143 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from web.views import *
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'elmolino.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', home),
url(r'^vivero', vivero),
url(r'^huerto', huerto),
url(r'^servicios', servicios),
url(r'^el_molino', el_molino),
url(r'^entrar', entrar),
url(r'^salir', salir),
url(r'^carrito', carr),
url(r'^compras/$', compras),
url(r'^foro/$', foros),
url(r'^noticias/$', noticias),
url(r'^compras/evento/$', evento_carrito),
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^usuario/$', usuario),
)
| gpl-2.0 | 5,460,872,925,354,897,000 | 44.72 | 81 | 0.405074 | false |
wozz/electrum-myr | plugins/virtualkeyboard.py | 1 | 2092 | from PyQt4.QtGui import *
from electrum_myr import BasePlugin
from electrum_myr.i18n import _
from electrum_myr.plugins import BasePlugin, hook
from electrum_myr.i18n import _
class Plugin(BasePlugin):
def fullname(self):
return 'Virtual Keyboard'
def description(self):
return '%s\n%s' % (_("Add an optional virtual keyboard to the password dialog."), _("Warning: do not use this if it makes you pick a weaker password."))
@hook
def init_qt(self, gui):
self.gui = gui
self.vkb = None
self.vkb_index = 0
@hook
def password_dialog(self, pw, grid, pos):
vkb_button = QPushButton(_("+"))
vkb_button.setFixedWidth(20)
vkb_button.clicked.connect(lambda: self.toggle_vkb(grid, pw))
grid.addWidget(vkb_button, pos, 2)
self.kb_pos = 2
def toggle_vkb(self, grid, pw):
if self.vkb: grid.removeItem(self.vkb)
self.vkb = self.virtual_keyboard(self.vkb_index, pw)
grid.addLayout(self.vkb, self.kb_pos, 0, 1, 3)
self.vkb_index += 1
def virtual_keyboard(self, i, pw):
import random
i = i%3
if i == 0:
chars = 'abcdefghijklmnopqrstuvwxyz '
elif i == 1:
chars = 'ABCDEFGHIJKLMNOPQRTSUVWXYZ '
elif i == 2:
chars = '1234567890!?.,;:/%&()[]{}+-'
n = len(chars)
s = []
for i in xrange(n):
while True:
k = random.randint(0,n-1)
if k not in s:
s.append(k)
break
def add_target(t):
return lambda: pw.setText(str( pw.text() ) + t)
vbox = QVBoxLayout()
grid = QGridLayout()
grid.setSpacing(2)
for i in range(n):
l_button = QPushButton(chars[s[i]])
l_button.setFixedWidth(25)
l_button.setFixedHeight(25)
l_button.clicked.connect(add_target(chars[s[i]]) )
grid.addWidget(l_button, i/6, i%6)
vbox.addLayout(grid)
return vbox
| gpl-3.0 | -7,775,485,789,755,474,000 | 27.657534 | 160 | 0.544933 | false |
andymckay/addons-server | src/olympia/api/authentication.py | 3 | 5288 | from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
import commonware
import jwt
from rest_framework import exceptions
from rest_framework.authentication import get_authorization_header
from rest_framework_jwt.authentication import (
JSONWebTokenAuthentication as UpstreamJSONWebTokenAuthentication)
from olympia import amo
from olympia.api import jwt_auth
from olympia.api.models import APIKey
log = commonware.log.getLogger('z.api.authentication')
class JSONWebTokenAuthentication(UpstreamJSONWebTokenAuthentication):
"""
DRF authentication class for JWT header auth.
"""
def authenticate_credentials(self, request):
"""
Mimic what our ACLMiddleware does after a successful authentication,
because otherwise that behaviour would be missing in the API since API
auth happens after the middleware process request phase.
"""
result = super(
JSONWebTokenAuthentication, self).authenticate_credentials(request)
amo.set_user(result)
return result
class JWTKeyAuthentication(UpstreamJSONWebTokenAuthentication):
"""
DRF authentication class for JWT header auth with API keys.
This extends the django-rest-framework-jwt auth class to get the
shared JWT secret from our APIKey database model. Each user (an add-on
developer) can have one or more API keys. The JWT is issued with their
public ID and is signed with their secret.
**IMPORTANT**
Please note that unlike typical JWT usage, this authenticator only
signs and verifies that the user is who they say they are. It does
not sign and verify the *entire request*. In other words, when you use
this authentication method you cannot prove that the request was made
by the authenticated user.
"""
def authenticate(self, request):
"""
Returns a two-tuple of `User` and token if a valid signature has been
supplied using JWT-based authentication. Otherwise returns `None`.
Copied from rest_framework_jwt BaseJSONWebTokenAuthentication, with
the decode_handler changed to our own - because we don't want that
decoder to be the default one in settings - and logging added.
"""
jwt_value = self.get_jwt_value(request)
if jwt_value is None:
return None
try:
payload = jwt_auth.jwt_decode_handler(jwt_value)
except Exception, exc:
try:
# Log all exceptions
log.info('JWTKeyAuthentication failed; '
'it raised %s (%s)', exc.__class__.__name__, exc)
# Re-raise to deal with them properly.
raise exc
except jwt.ExpiredSignature:
msg = _('Signature has expired.')
raise exceptions.AuthenticationFailed(msg)
except jwt.DecodeError:
msg = _('Error decoding signature.')
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = _('Invalid JWT Token.')
raise exceptions.AuthenticationFailed(msg)
# Note: AuthenticationFailed can also be raised directly from our
# jwt_decode_handler.
user = self.authenticate_credentials(payload)
return (user, jwt_value)
def authenticate_credentials(self, payload):
"""
Returns a verified AMO user who is active and allowed to make API
requests.
"""
if 'orig_iat' in payload:
msg = ("API key based tokens are not refreshable, don't include "
"`orig_iat` in their payload.")
raise exceptions.AuthenticationFailed(msg)
try:
api_key = APIKey.get_jwt_key(key=payload['iss'])
except APIKey.DoesNotExist:
msg = 'Invalid API Key.'
raise exceptions.AuthenticationFailed(msg)
if api_key.user.deleted:
msg = 'User account is disabled.'
raise exceptions.AuthenticationFailed(msg)
if not api_key.user.read_dev_agreement:
msg = 'User has not read developer agreement.'
raise exceptions.AuthenticationFailed(msg)
amo.set_user(api_key.user)
return api_key.user
def get_jwt_value(self, request):
"""
Get the JWT token from the authorization header.
Copied from upstream's implementation but uses a hardcoded 'JWT'
prefix in order to be isolated from JWT_AUTH_HEADER_PREFIX setting
which is used for the non-api key auth above.
"""
auth = get_authorization_header(request).split()
auth_header_prefix = 'jwt' # JWT_AUTH_HEADER_PREFIX.lower()
if not auth or smart_text(auth[0].lower()) != auth_header_prefix:
return None
if len(auth) == 1:
msg = _('Invalid Authorization header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid Authorization header. Credentials string '
'should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
return auth[1]
| bsd-3-clause | -6,535,593,066,688,116,000 | 37.59854 | 79 | 0.645991 | false |
elba7r/frameworking | frappe/www/rss.py | 8 | 1164 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import urllib
from frappe.utils import escape_html, get_request_site_address, now, cstr
no_cache = 1
base_template_path = "templates/www/rss.xml"
def get_context(context):
"""generate rss feed"""
host = get_request_site_address()
blog_list = frappe.db.sql("""\
select route as name, published_on, modified, title, content from `tabBlog Post`
where ifnull(published,0)=1
order by published_on desc limit 20""", as_dict=1)
for blog in blog_list:
blog_page = cstr(urllib.quote(blog.route.encode("utf-8")))
blog.link = urllib.basejoin(host, blog_page)
blog.content = escape_html(blog.content or "")
if blog_list:
modified = max((blog['modified'] for blog in blog_list))
else:
modified = now()
blog_settings = frappe.get_doc('Blog Settings', 'Blog Settings')
context = {
'title': blog_settings.blog_title or "Blog",
'description': blog_settings.blog_introduction or "",
'modified': modified,
'items': blog_list,
'link': host + '/blog'
}
# print context
return context
| mit | 7,842,868,845,911,200,000 | 26.069767 | 82 | 0.702749 | false |
eecs445-f16/umich-eecs445-f16 | lecture07_naive-bayes/Lec07.py | 2 | 5343 | # plotting
from matplotlib import pyplot as plt;
from matplotlib import colors
import matplotlib as mpl;
from mpl_toolkits.mplot3d import Axes3D
if "bmh" in plt.style.available: plt.style.use("bmh");
# matplotlib objects
from matplotlib import mlab;
from matplotlib import gridspec;
# scientific
import numpy as np;
import scipy as scp;
from scipy import linalg
import scipy.stats;
# table display
import pandas as pd
from IPython.display import display
# python
import random;
# warnings
import warnings
warnings.filterwarnings("ignore")
# rise config
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'theme': 'simple',
'start_slideshow_at': 'selected',
'transition':'fade',
'scroll': False
});
def lin_reg_classifier(means, covs, n, outliers):
"""
Least Squares for Classification.
:Parameters:
- `means`: means of multivariate normal distributions used to generate data.
- `covs`: terms of variance-covariance matrix used to determine spread of simulated data.
- `n`: number of samples.
- `outliers`: user-specified outliers to be added to the second simulated dataset.
"""
# generate data
x1, y1 = np.random.multivariate_normal(means[0], covs[0], n[0]).T
x2, y2 = np.random.multivariate_normal(means[1], covs[1], n[1]).T
# add targets
class_1 = [1]*n[0] + [0]*n[1]
class_2 = [0]*n[0] + [1]*n[1]
T = np.mat([class_1, class_2]).T
# add intercept and merge data
ones = np.ones(n[0]+n[1])
a = np.hstack((x1,x2))
b = np.hstack((y1,y2))
X = np.mat([ones, a, b]).T
# obtain weights
w_t = np.dot(T.T, np.linalg.pinv(X).T)
# obtain decision line
decision_line_int = -(w_t.item((0,0)) - w_t.item((1,0)))/(w_t.item((0,2)) - w_t.item((1,2)))
decision_line_slope = - (w_t.item((0,1)) - w_t.item((1,1)))/(w_t.item((0,2)) - w_t.item((1,2)))
# add outliers to the second set of simulated data
extract_x = []
extract_y = []
for i in outliers:
extract_x.append(i[0])
extract_y.append(i[1])
x2_out = np.hstack((x2, extract_x))
y2_out = np.hstack((y2, extract_y))
class_1_out = [1]*n[0] + [0]*n[1] + [0]*len(outliers)
class_2_out = [0]*n[0] + [1]*n[1] + [1]*len(outliers)
T_out = np.array([class_1_out, class_2_out]).T
ones_out = np.ones(n[0]+n[1]+len(outliers))
a_out = np.hstack((x1,x2_out))
b_out = np.hstack((y1,y2_out))
X_out = np.array([ones_out, a_out, b_out]).T
# obtain revised weights and decision line
w_t_out = np.dot(T_out.T, np.linalg.pinv(X_out).T)
decision_line_int_out = -(w_t_out[0][0] - w_t_out[1][0])/(w_t_out[0][2] - w_t_out[1][2])
decision_line_slope_out = - (w_t_out[0][1] - w_t_out[1][1])/(w_t_out[0][2] - w_t_out[1][2])
# plot results
x = np.linspace(np.min(a_out)-3 , np.max(a_out)+3, 100)
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=True)
plt.suptitle('Least Squares for Classification')
ax1.plot(x, decision_line_int+decision_line_slope*x, 'k', linewidth=2)
ax1.plot(x1, y1, 'go', x2, y2, 'bs', alpha=0.4)
ax2.plot(x, decision_line_int_out+decision_line_slope_out*x, 'k', linewidth=2)
ax2.plot(x1, y1, 'go', x2, y2, 'bs', alpha=0.4)
for i in range(len(outliers)):
ax2.plot(outliers[i][0], outliers[i][1], 'bs', alpha=0.4)
fig.set_size_inches(15, 5, forward=True)
ax1.set_xlim([np.min(a_out)-1, np.max(a_out)+1,])
ax2.set_xlim([np.min(a_out)-1, np.max(a_out)+1])
ax1.set_ylim([np.min(b_out)-1, np.max(b_out)+1,])
ax2.set_ylim([np.min(b_out)-1, np.max(b_out)+1])
ax1.set_xlabel('X1')
ax2.set_xlabel('X1')
ax1.set_ylabel('X2')
plt.show()
def generate_gda(means, covs, num_samples):
num_classes = len(means);
num_samples //= num_classes;
# cheat and draw equal number of samples from each gaussian
samples = [
np.random.multivariate_normal(means[c],covs[c],num_samples).T
for c in range(num_classes)
];
return np.concatenate(samples, axis=1);
def plot_decision_contours(means, covs):
# plt
fig = plt.figure(figsize=(10,6));
ax = fig.gca();
# generate samples
data_x,data_y = generate_gda(means, covs, 1000);
ax.plot(data_x, data_y, 'x');
# dimensions
min_x, max_x = -10,10;
min_y, max_y = -10,10;
# grid
delta = 0.025
x = np.arange(min_x, max_x, delta);
y = np.arange(min_y, max_y, delta);
X, Y = np.meshgrid(x, y);
# bivariate difference of gaussians
mu1,mu2 = means;
sigma1, sigma2 = covs;
Z1 = mlab.bivariate_normal(X, Y, sigmax=sigma1[0][0], sigmay=sigma1[1][1], mux=mu1[0], muy=mu1[1], sigmaxy=sigma1[0][1]);
Z2 = mlab.bivariate_normal(X, Y, sigmax=sigma2[0][0], sigmay=sigma2[1][1], mux=mu2[0], muy=mu2[1], sigmaxy=sigma2[0][1]);
Z = Z2 - Z1;
# contour plot
ax.contour(X, Y, Z, levels=np.linspace(np.min(Z),np.max(Z),10));
cs = ax.contour(X, Y, Z, levels=[0], c="k", linewidths=5);
plt.clabel(cs, fontsize=10, inline=1, fmt='%1.3f')
# plot settings
ax.set_xlim((min_x,max_x));
ax.set_ylim((min_y,max_y));
# ax.set_title("Gaussian Discriminant Analysis: $P(y=1 | x) - P(y=0 | x)$", fontsize=20)
ax.set_title("Countours: $P(y=1 | x) - P(y=0 | x)$", fontsize=20) | mit | 2,807,670,406,307,736,600 | 33.477419 | 125 | 0.603781 | false |
jbeezley/girder | girder/api/describe.py | 1 | 30739 | # -*- coding: utf-8 -*-
import bson.json_util
import dateutil.parser
import inspect
import jsonschema
import os
import six
import cherrypy
from collections import OrderedDict
from girder import constants, logprint
from girder.api.rest import getCurrentUser, getBodyJson
from girder.constants import SortDir, VERSION
from girder.exceptions import RestException
from girder.models.setting import Setting
from girder.settings import SettingKey
from girder.utility import config, toBool
from girder.utility.model_importer import ModelImporter
from girder.utility.webroot import WebrootBase
from girder.utility.resource import _apiRouteMap
from . import docs, access
from .rest import Resource, getApiUrl, getUrlParts
if six.PY3:
from inspect import signature, Parameter
else:
from funcsigs import signature, Parameter
SWAGGER_VERSION = '2.0'
class Description(object):
"""
This class provides convenient chainable semantics to allow api route
handlers to describe themselves to the documentation. A route handler
function can apply the :py:class:`girder.api.describe.describeRoute`
decorator to itself (called with an instance of this class) in order to
describe itself.
"""
# Data Type map from common name or type to (type, format)
# See Data Type spec:
# https://github.com/OAI/OpenAPI-Specification/blob/
# 0122c22e7fb93b571740dd3c6e141c65563a18be/versions/2.0.md#data-types
_dataTypeMap = {
# Primitives
'integer': ('integer', 'int32'),
'long': ('integer', 'int64'),
'number': ('number', None),
'float': ('number', 'float'),
'double': ('number', 'double'),
'string': ('string', None),
'byte': ('string', 'byte'),
'binary': ('string', 'binary'),
'boolean': ('boolean', None),
'date': ('string', 'date'),
'dateTime': ('string', 'date-time'),
'password': ('string', 'password'),
'file': ('file', None)
}
def __init__(self, summary):
self._summary = summary
self._params = []
self._responses = {}
self._consumes = []
self._produces = []
self._responseClass = None
self._responseClassArray = False
self._notes = None
self._deprecated = False
self.hasPagingParams = False
self.modelParams = {}
self.jsonParams = {}
def asDict(self):
"""
Returns this description object as an appropriately formatted dict
"""
# Responses Object spec:
# The Responses Object MUST contain at least one response code, and it
# SHOULD be the response for a successful operation call.
if '200' not in self._responses:
self._responses['200'] = {
'description': 'Success'
}
if self._responseClass is not None:
schema = {
'$ref': '#/definitions/%s' % self._responseClass
}
if self._responseClassArray:
schema = {
'type': 'array',
'items': schema
}
self._responses['200']['schema'] = schema
resp = {
'summary': self._summary,
'responses': self._responses
}
if self._params:
resp['parameters'] = self._params
if self._notes is not None:
resp['description'] = self._notes
if self._consumes:
resp['consumes'] = self._consumes
if self._produces:
# swagger has a bug where not all appropriate mime types are
# considered to be binary (see
# https://github.com/swagger-api/swagger-ui/issues/1605). If we
# have specified zip format, replace it with
# application/octet-stream
# Reduce the list of produces values to unique values,
# maintaining the order.
produces = list(OrderedDict.fromkeys([
'application/octet-stream' if item in ('application/zip', )
else item for item in self._produces]))
resp['produces'] = produces
if self._deprecated:
resp['deprecated'] = True
return resp
def responseClass(self, obj, array=False):
self._responseClass = obj
self._responseClassArray = array
return self
def _validateParamInfo(self, dataType, paramType, name):
"""
Helper to convert and validate the dataType and paramType.
Prints warnings if invalid values were passed.
"""
# Legacy data type conversions
if dataType == 'int':
dataType = 'integer'
# Parameter Object spec:
# If type is "file", then the swagger "consumes" field MUST be either
# "multipart/form-data", "application/x-www-form-urlencoded" or both
# and the parameter MUST be in "formData".
if dataType == 'file':
paramType = 'formData'
# Get type and format from common name
dataTypeFormat = None
if dataType in self._dataTypeMap:
dataType, dataTypeFormat = self._dataTypeMap[dataType]
# If we are dealing with the body then the dataType might be defined
# by a schema added using addModel(...), we don't know for sure as we
# don't know the resource name here to look it up.
elif paramType != 'body':
logprint.warning(
'WARNING: Invalid dataType "%s" specified for parameter names "%s"' %
(dataType, name))
# Parameter Object spec:
# Since the parameter is not located at the request body, it is limited
# to simple types (that is, not an object).
if paramType != 'body' and dataType not in (
'string', 'number', 'integer', 'long', 'boolean', 'array', 'file', 'float',
'double', 'date', 'dateTime'):
logprint.warning(
'WARNING: Invalid dataType "%s" specified for parameter "%s"' % (dataType, name))
if paramType == 'form':
paramType = 'formData'
return dataType, dataTypeFormat, paramType
def param(self, name, description, paramType='query', dataType='string',
required=True, enum=None, default=None, strip=False, lower=False, upper=False):
"""
This helper will build a parameter declaration for you. It has the most
common options as defaults, so you won't have to repeat yourself as much
when declaring the APIs.
Note that we could expose more parameters from the Parameter Object
spec, for example: format, allowEmptyValue, minimum, maximum, pattern,
uniqueItems.
:param name: name of the parameter used in the REST query.
:param description: explanation of the parameter.
:param paramType: how is the parameter sent. One of 'query', 'path',
'body', 'header', or 'formData'.
:param dataType: the data type expected in the parameter. This is one
of 'integer', 'long', 'float', 'double', 'string',
'byte', 'binary', 'boolean', 'date', 'dateTime',
'password', or 'file'.
:param required: True if the request will fail if this parameter is not
present, False if the parameter is optional.
:param enum: a fixed list of possible values for the field.
:type enum: `list`
:param strip: For string types, set this to True if the string should be
stripped of white space.
:type strip: bool
:param lower: For string types, set this to True if the string should be
converted to lowercase.
:type lower: bool
:param upper: For string types, set this to True if the string should be
converted to uppercase.
:type upper: bool
"""
dataType, format, paramType = self._validateParamInfo(dataType, paramType, name)
param = {
'name': name,
'description': description,
'in': paramType,
'required': required
}
if dataType == 'string':
param['_strip'] = strip
param['_lower'] = lower
param['_upper'] = upper
if paramType == 'body':
param['schema'] = {
'$ref': '#/definitions/%s' % dataType
}
else:
param['type'] = dataType
if format is not None:
param['format'] = format
if enum:
param['enum'] = enum
if default is not None:
param['default'] = default
self._params.append(param)
return self
def modelParam(self, name, description=None, model=None, destName=None, paramType='path',
plugin='_core', level=None, required=True, force=False, exc=True,
requiredFlags=None, **kwargs):
"""
This should be used in lieu of ``param`` if the parameter is a model ID
and the model should be loaded and passed into the route handler. For example,
if you have a route like ``GET /item/:id``, you could do:
>>> from girder.models.item import Item
>>> modelParam('id', model=Item, level=AccessType.READ)
Which would cause the ``id`` parameter in the path to be mapped to an
item model parameter named ``item``, and ensure that the calling user
has at least ``READ`` access on that item. For parameters passed in
the query string or form data, for example a request like
``POST /item?folderId=...``, you must specify the ``paramType``.
>>> modelParam('folderId', 'The ID of the parent folder.', model=Folder,
... level=AccessType.WRITE, paramType='query')
Note that in the above example, ``model`` is omitted; in this case, the
model is inferred to be ``'folder'`` from the parameter name ``'folderId'``.
:param name: The name passed in via the request, e.g. 'id'.
:type name: str
:param description: The description of the parameter. If not passed, defaults
to "The ID of the <model>."
:type description: str
:param destName: The kwarg name after model loading, e.g. 'folder'. Defaults
to the value of the model parameter.
:type destName: str
:param paramType: how is the parameter sent. One of 'query', 'path',
'body', 'header', or 'formData'.
:param model: The model class to use for loading, or a name, e.g. 'folder'. If not passed,
defaults to stripping the last two characters from the name, such that e.g. 'folderId'
would make the model become 'folder'.
:type model: class or str
:param plugin: Plugin name, if loading a plugin model. Only used when the ``model``
param is a string rather than a class.
:type plugin: str
:param level: Access level, if this is an access controlled model.
:type level: AccessType
:param required: Whether this parameter is required.
:type required: bool
:param force: Force loading of the model (skip access check).
:type force: bool
:param exc: Whether an exception should be raised for a nonexistent resource.
:type exc: bool
:param requiredFlags: Access flags that are required on the object being loaded.
:type requiredFlags: str or list/set/tuple of str or None
"""
if model is None:
model = name[:-2] # strip off "Id"
isModelClass = inspect.isclass(model)
if description is None:
description = 'The ID of the document.'
self.param(name=name, description=description, paramType=paramType, required=required)
self.modelParams[name] = {
'destName': destName,
'level': level,
'force': force,
'model': model,
'plugin': plugin,
'isModelClass': isModelClass,
'exc': exc,
'required': required,
'requiredFlags': requiredFlags,
'kwargs': kwargs
}
return self
def jsonParam(self, name, description, paramType='query', dataType='string', required=True,
default=None, requireObject=False, requireArray=False, schema=None):
"""
Specifies a parameter that should be processed as JSON.
:param requireObject: Whether the value must be a JSON object / Python dict.
:type requireObject: bool
:param requireArray: Whether the value must be a JSON array / Python list.
:type requireArray: bool
:param schema: A JSON schema that will be used to validate the parameter value. If
this is passed, it overrides any ``requireObject`` or ``requireArray`` values
that were passed.
:type schema: dict
"""
if default:
default = bson.json_util.dumps(default)
self.param(
name=name, description=description, paramType=paramType, dataType=dataType,
required=required, default=default)
self.jsonParams[name] = {
'requireObject': requireObject,
'requireArray': requireArray,
'schema': schema
}
return self
def pagingParams(self, defaultSort, defaultSortDir=SortDir.ASCENDING, defaultLimit=50):
"""
Adds the limit, offset, sort, and sortdir parameter documentation to
this route handler.
:param defaultSort: The default field used to sort the result set.
:type defaultSort: str
:param defaultSortDir: Sort order: -1 or 1 (desc or asc)
:type defaultSortDir: int
:param defaultLimit: The default page size.
:type defaultLimit: int
"""
self.param(
'limit', 'Result set size limit.', default=defaultLimit, required=False, dataType='int')
self.param('offset', 'Offset into result set.', default=0, required=False, dataType='int')
if defaultSort is not None:
self.param(
'sort', 'Field to sort the result set by.', default=defaultSort, required=False,
strip=True)
self.param(
'sortdir', 'Sort order: 1 for ascending, -1 for descending.',
required=False, dataType='integer', enum=[SortDir.ASCENDING, SortDir.DESCENDING],
default=defaultSortDir)
self.hasPagingParams = True
return self
def consumes(self, value):
self._consumes.append(value)
return self
def produces(self, value):
if isinstance(value, (list, tuple)):
self._produces.extend(value)
else:
self._produces.append(value)
return self
def notes(self, notes):
self._notes = notes
return self
def errorResponse(self, reason='A parameter was invalid.', code=400):
"""
This helper will build an errorResponse declaration for you. Many
endpoints will be able to use the default parameter values for one of
their responses.
:param reason: The reason or list of reasons why the error occurred.
:type reason: `str, list, or tuple`
:param code: HTTP status code.
:type code: int
"""
code = str(code)
# Combine list of reasons into a single string.
# swagger-ui renders the description using Markdown.
if not isinstance(reason, six.string_types):
reason = '\n\n'.join(reason)
if code in self._responses:
self._responses[code]['description'] += '\n\n' + reason
else:
self._responses[code] = {
'description': reason
}
return self
def deprecated(self):
"""
Mark the route as deprecated.
"""
self._deprecated = True
return self
@property
def params(self):
return self._params
class ApiDocs(WebrootBase):
"""
This serves up the Swagger page.
"""
def __init__(self, templatePath=None):
if not templatePath:
templatePath = os.path.join(constants.PACKAGE_DIR,
'api', 'api_docs.mako')
super(ApiDocs, self).__init__(templatePath)
curConfig = config.getConfig()
self.vars['mode'] = curConfig['server'].get('mode', '')
def _renderHTML(self):
from girder.utility import server
self.vars['apiRoot'] = server.getApiRoot()
self.vars['staticPublicPath'] = server.getStaticPublicPath()
self.vars['brandName'] = Setting().get(SettingKey.BRAND_NAME)
return super(ApiDocs, self)._renderHTML()
class Describe(Resource):
def __init__(self):
super(Describe, self).__init__()
self.route('GET', (), self.listResources, nodoc=True)
@access.public
def listResources(self, params):
# Paths Object
paths = {}
# Definitions Object
definitions = dict(**docs.models[None])
# List of Tag Objects
tags = []
routeMap = _apiRouteMap()
for resource in sorted(six.viewkeys(docs.routes), key=str):
# Update Definitions Object
if resource in docs.models:
for name, model in six.viewitems(docs.models[resource]):
definitions[name] = model
prefixPath = None
tag = resource
if isinstance(resource, Resource):
if resource not in routeMap:
raise RestException('Resource not mounted: %s' % resource)
prefixPath = routeMap[resource]
tag = prefixPath[0]
# Tag Object
tags.append({
'name': tag
})
for route, methods in six.viewitems(docs.routes[resource]):
# Path Item Object
pathItem = {}
for method, operation in six.viewitems(methods):
# Operation Object
pathItem[method.lower()] = operation
if prefixPath:
operation['tags'] = prefixPath[:1]
if prefixPath:
route = '/'.join([''] + prefixPath + [route[1:]])
paths[route] = pathItem
apiUrl = getApiUrl(preferReferer=True)
urlParts = getUrlParts(apiUrl)
host = urlParts.netloc
basePath = urlParts.path
return {
'swagger': SWAGGER_VERSION,
'info': {
'title': 'Girder REST API',
'version': VERSION['release']
},
'host': host,
'basePath': basePath,
'tags': tags,
'paths': paths,
'definitions': definitions
}
class describeRoute(object): # noqa: class name
def __init__(self, description):
"""
This returns a decorator to set the API documentation on a route
handler. Pass the Description object (or None) that you want to use to
describe this route. It should be used like the following example:
.. code-block:: python
@describeRoute(
Description('Do something')
.param('foo', 'Some parameter', ...)
)
def routeHandler(...)
:param description: The description for the route.
:type description: :py:class:`girder.api.describe.Description` or None
"""
self.description = description
def __call__(self, fun):
fun.description = self.description
return fun
class autoDescribeRoute(describeRoute): # noqa: class name
def __init__(self, description, hide=False):
"""
Like describeRoute, but this decorator also controls behavior of the
underlying method. It handles parameter validation and transformation
based on the Description object passed.
:param description: The description object.
:type description: Description
:param hide: Set to True if this route should not appear in the swagger listing.
:type hide: bool
"""
super(autoDescribeRoute, self).__init__(description=description)
self.hide = hide
def _passArg(self, fun, kwargs, name, val):
"""
This helper passes the arguments to the underlying function if the function
has an argument with the given name. Otherwise, it adds it into the "params"
argument, which is a dictionary containing other parameters.
:param fun: The wrapped route handler function
:type fun: callable
:param name: The name of the argument to set
:type name: str
:param kwargs: The arguments to be passed down to the function.
:type kwargs: dict
:param val: The value of the argument to set
"""
if name in self._funNamedArgs or self._funHasKwargs:
kwargs[name] = val
kwargs['params'].pop(name, None)
else:
kwargs['params'][name] = val
def _mungeKwargs(self, kwargs, fun):
"""
Performs final modifications to the kwargs passed into the wrapped function.
Combines the sort/sortdir params appropriately for consumption by the model
layer, and only passes the "params" catch-all dict if there is a corresponding
kwarg for it in the wrapped function.
"""
if self.description.hasPagingParams and 'sort' in kwargs:
sortdir = kwargs.pop('sortdir', None) or kwargs['params'].pop('sortdir', None)
kwargs['sort'] = [(kwargs['sort'], sortdir)]
if 'params' not in self._funNamedArgs and not self._funHasKwargs:
kwargs.pop('params', None)
def _inspectFunSignature(self, fun):
self._funNamedArgs = set()
self._funHasKwargs = False
for funParam in six.viewvalues(signature(fun).parameters):
if funParam.kind in {Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY}:
# POSITIONAL_OR_KEYWORD are basic positional parameters
# KEYWORD_ONLY are named parameters that appear after a * in Python 3
self._funNamedArgs.add(funParam.name)
elif funParam.kind == Parameter.VAR_KEYWORD:
# VAR_KEYWORD is the **kwargs parameter
self._funHasKwargs = True
@staticmethod
def _destName(info, model):
destName = info['destName']
if destName is None:
if info['isModelClass']:
destName = model.name
else:
destName = info['model']
return destName
def __call__(self, fun):
self._inspectFunSignature(fun)
@six.wraps(fun)
def wrapped(*args, **kwargs):
"""
Transform any passed params according to the spec, or
fill in default values for any params not passed.
"""
# Combine path params with form/query params into a single lookup table
params = {k: v for k, v in six.viewitems(kwargs) if k != 'params'}
params.update(kwargs.get('params', {}))
kwargs['params'] = kwargs.get('params', {})
for descParam in self.description.params:
# We need either a type or a schema ( for message body )
if 'type' not in descParam and 'schema' not in descParam:
continue
name = descParam['name']
model = self._getModel(name, self.description.modelParams)
if name in params:
if name in self.description.jsonParams:
info = self.description.jsonParams[name]
val = self._loadJson(name, info, params[name])
self._passArg(fun, kwargs, name, val)
elif name in self.description.modelParams:
info = self.description.modelParams[name]
kwargs.pop(name, None) # Remove from path params
val = self._loadModel(name, info, params[name], model)
self._passArg(fun, kwargs, self._destName(info, model), val)
else:
val = self._validateParam(name, descParam, params[name])
self._passArg(fun, kwargs, name, val)
elif descParam['in'] == 'body':
if name in self.description.jsonParams:
info = self.description.jsonParams[name].copy()
info['required'] = descParam['required']
val = self._loadJsonBody(name, info)
self._passArg(fun, kwargs, name, val)
else:
self._passArg(fun, kwargs, name, cherrypy.request.body)
elif descParam['in'] == 'header':
continue # For now, do nothing with header params
elif 'default' in descParam:
self._passArg(fun, kwargs, name, descParam['default'])
elif descParam['required']:
raise RestException('Parameter "%s" is required.' % name)
else:
# If required=False but no default is specified, use None
if name in self.description.modelParams:
info = self.description.modelParams[name]
kwargs.pop(name, None) # Remove from path params
self._passArg(fun, kwargs, info['destName'] or model.name, None)
else:
self._passArg(fun, kwargs, name, None)
self._mungeKwargs(kwargs, fun)
return fun(*args, **kwargs)
if self.hide:
wrapped.description = None
else:
wrapped.description = self.description
return wrapped
def _validateJsonType(self, name, info, val):
if info.get('schema') is not None:
try:
jsonschema.validate(val, info['schema'])
except jsonschema.ValidationError as e:
raise RestException('Invalid JSON object for parameter %s: %s' % (
name, str(e)))
elif info['requireObject'] and not isinstance(val, dict):
raise RestException('Parameter %s must be a JSON object.' % name)
elif info['requireArray'] and not isinstance(val, list):
raise RestException('Parameter %s must be a JSON array.' % name)
def _loadJsonBody(self, name, info):
val = None
if cherrypy.request.body.length == 0 and info['required']:
raise RestException('JSON parameter %s must be passed in request body.' % name)
elif cherrypy.request.body.length > 0:
val = getBodyJson()
self._validateJsonType(name, info, val)
return val
def _loadJson(self, name, info, value):
try:
val = bson.json_util.loads(value)
except ValueError:
raise RestException('Parameter %s must be valid JSON.' % name)
self._validateJsonType(name, info, val)
return val
def _getModel(self, name, modelParams):
if name not in self.description.modelParams:
return
info = self.description.modelParams[name]
if info['isModelClass']:
return info['model']()
else:
return ModelImporter.model(info['model'], info['plugin'])
def _loadModel(self, name, info, id, model):
if info['force']:
doc = model.load(id, force=True, **info['kwargs'])
elif info['level'] is not None:
doc = model.load(id=id, level=info['level'], user=getCurrentUser(), **info['kwargs'])
else:
doc = model.load(id, **info['kwargs'])
if doc is None and info['exc']:
raise RestException('Invalid %s id (%s).' % (model.name, str(id)))
if info['requiredFlags']:
model.requireAccessFlags(doc, user=getCurrentUser(), flags=info['requiredFlags'])
return doc
def _handleString(self, name, descParam, value):
if descParam['_strip']:
value = value.strip()
if descParam['_lower']:
value = value.lower()
if descParam['_upper']:
value = value.upper()
format = descParam.get('format')
if format in ('date', 'date-time'):
try:
value = dateutil.parser.parse(value)
except ValueError:
raise RestException('Invalid date format for parameter %s: %s.' % (name, value))
if format == 'date':
value = value.date()
return value
def _handleInt(self, name, descParam, value):
try:
return int(value)
except ValueError:
raise RestException('Invalid value for integer parameter %s: %s.' % (name, value))
def _handleNumber(self, name, descParam, value):
try:
return float(value)
except ValueError:
raise RestException('Invalid value for numeric parameter %s: %s.' % (name, value))
def _validateParam(self, name, descParam, value):
"""
Validates and transforms a single parameter that was passed. Raises
RestException if the passed value is invalid.
:param name: The name of the param.
:type name: str
:param descParam: The formal parameter in the Description.
:type descParam: dict
:param value: The value passed in for this param for the current request.
:returns: The value transformed
"""
type = descParam.get('type')
# Coerce to the correct data type
if type == 'string':
value = self._handleString(name, descParam, value)
elif type == 'boolean':
value = toBool(value)
elif type == 'integer':
value = self._handleInt(name, descParam, value)
elif type == 'number':
value = self._handleNumber(name, descParam, value)
# Enum validation (should be after type coercion)
if 'enum' in descParam and value not in descParam['enum']:
raise RestException('Invalid value for %s: "%s". Allowed values: %s.' % (
name, value, ', '.join(str(v) for v in descParam['enum'])))
return value
| apache-2.0 | -6,370,051,301,363,379,000 | 37.137717 | 100 | 0.577182 | false |
balint256/gnuradio | docs/doxygen/doxyxml/doxyindex.py | 11 | 8909 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
import os
from generated import index
from base import Base
from text import description
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
"""
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyIndex, self)._parse()
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
# For files and namespaces we want the contents to be
# accessible directly from the parent rather than having
# to go through the file object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
elif self.get_cls(mem) == DoxyNamespace:
self._members += converted.members()
self._members.append(converted)
else:
self._members.append(converted)
def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
@classmethod
def can_parse(cls, obj):
return obj.kind == cls.kind
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
def set_parameters(self, data):
vs = [ddc.value for ddc in data.detaileddescription.content_]
pls = []
for v in vs:
if hasattr(v, 'parameterlist'):
pls += v.parameterlist
pis = []
for pl in pls:
pis += pl.parameteritem
dpis = []
for pi in pis:
dpi = DoxyParameterItem(pi)
dpi._parse()
dpis.append(dpi)
self._data['params'] = dpis
class DoxyCompound(DoxyCompMem):
pass
class DoxyMember(DoxyCompMem):
pass
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'function'
def _parse(self):
if self._parsed:
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
self.set_parameters(self._parse_data)
if not self._data['params']:
# If the params weren't set by a comment then just grab the names.
self._data['params'] = []
prms = self._parse_data.param
for prm in prms:
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyFunction)
class DoxyParam(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyParam, self)._parse()
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
@property
def description(self):
descriptions = []
if self.brief_description:
descriptions.append(self.brief_description)
if self.detailed_description:
descriptions.append(self.detailed_description)
return '\n\n'.join(descriptions)
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
name = property(lambda self: self.data()['declname'])
class DoxyParameterItem(DoxyMember):
"""A different representation of a parameter in Doxygen."""
def _parse(self):
if self._parsed:
return
super(DoxyParameterItem, self)._parse()
names = []
for nl in self._parse_data.parameternamelist:
for pn in nl.parametername:
names.append(description(pn))
# Just take first name
self._data['name'] = names[0]
# Get description
pd = description(self._parse_data.get_parameterdescription())
self._data['description'] = pd
description = property(lambda self: self.data()['description'])
name = property(lambda self: self.data()['name'])
class DoxyClass(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
self.set_parameters(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyClass)
class DoxyFile(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'file'
def _parse(self):
if self._parsed:
return
super(DoxyFile, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyFile)
class DoxyNamespace(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'namespace'
def _parse(self):
if self._parsed:
return
super(DoxyNamespace, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
Base.mem_classes.append(DoxyNamespace)
class DoxyGroup(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'group'
def _parse(self):
if self._parsed:
return
super(DoxyGroup, self)._parse()
self.retrieve_data()
if self._error:
return
cdef = self._retrieved_data.compounddef
self._data['title'] = description(cdef.title)
# Process inner groups
grps = cdef.innergroup
for grp in grps:
converted = DoxyGroup.from_refid(grp.refid, top=self.top)
self._members.append(converted)
# Process inner classes
klasses = cdef.innerclass
for kls in klasses:
converted = DoxyClass.from_refid(kls.refid, top=self.top)
self._members.append(converted)
# Process normal members
self.process_memberdefs()
title = property(lambda self: self.data()['title'])
Base.mem_classes.append(DoxyGroup)
class DoxyFriend(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'friend'
Base.mem_classes.append(DoxyFriend)
class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum',
'dir', 'page', 'signal', 'slot', 'property'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
Base.mem_classes.append(DoxyOther)
| gpl-3.0 | -9,188,378,029,588,908,000 | 28.5 | 85 | 0.615894 | false |
VirusTotal/msticpy | msticpy/nbtools/foliummap.py | 1 | 11242 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Folium map class."""
import math
import statistics as stats
import warnings
from typing import Iterable, List, Tuple
import folium
from .._version import VERSION
from .entityschema import Entity, GeoLocation, IpAddress
# pylint: enable=locally-disabled, unused-import
from ..common.utility import export
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=too-many-arguments, too-few-public-methods
@export
class FoliumMap:
"""Wrapper class for Folium/Leaflet mapping."""
def __init__(
self,
title: str = "layer1",
zoom_start: float = 2.5,
tiles=None,
width: str = "100%",
height: str = "100%",
location: list = None,
):
"""
Create an instance of the folium map.
Parameters
----------
title : str, optional
Name of the layer (the default is 'layer1')
zoom_start : int, optional
The zoom level of the map (the default is 7)
tiles : [type], optional
Custom set of tiles or tile URL (the default is None)
width : str, optional
Map display width (the default is '100%')
height : str, optional
Map display height (the default is '100%')
location : list, optional
Location to center map on
Attributes
----------
folium_map : folium.Map
The map object.
"""
if not location:
location = [47.67, -122.13]
self.folium_map = folium.Map(
zoom_start=zoom_start,
tiles=tiles,
width=width,
height=height,
location=location,
)
folium.TileLayer(name=title).add_to(self.folium_map)
self.locations: List[Tuple[float, float]] = []
def _repr_html_(self):
"""Return folium map as HTML."""
# pylint: disable=protected-access
return self.folium_map._repr_html_()
# pylint: enable=protected-access
def center_map(self):
"""Calculate and set map center based on current coordinates."""
self.folium_map.location = _get_center_coords(self.locations)
def add_ip_cluster(self, ip_entities: Iterable[IpAddress], **kwargs):
"""
Add a collection of IP Entities to the map.
Parameters
----------
ip_entities : Iterable[IpAddress]
a iterable of IpAddress Entities
Other Parameters
----------------
kwargs: icon properties to use for displaying this cluster
"""
geo_entity = GeoLocation() # type: ignore
geo_entity.CountryCode = "Unknown" # type: ignore
geo_entity.CountryName = "Unknown" # type: ignore
geo_entity.State = "Unknown" # type: ignore
geo_entity.City = "Unknown" # type: ignore
geo_entity.Longitude = 0.0 # type: ignore
geo_entity.Latitude = 0.0 # type: ignore
for ip_entity in ip_entities:
if ip_entity.Location is None:
ip_entity.Location = geo_entity # type: ignore
for ip_entity in ip_entities:
if (
not (
isinstance(ip_entity.Location.Latitude, (int, float))
and isinstance(ip_entity.Location.Longitude, (int, float))
)
or math.isnan(ip_entity.Location.Latitude)
or math.isnan(ip_entity.Location.Longitude)
):
warnings.warn(
"Invalid location information for IP: " + ip_entity.Address,
RuntimeWarning,
)
continue
loc_props = ", ".join(
[
f"{key}={val}"
for key, val in ip_entity.Location.properties.items()
if val
]
)
popup_text = "{loc_props}<br>IP: {IP}".format(
IP=ip_entity.Address, loc_props=loc_props
)
if (
"City" in ip_entity.Location.properties
or "CountryName" in ip_entity.Location.properties
):
tooltip_text = "{City}, {CountryName}".format(
**ip_entity.Location.properties
)
else:
tooltip_text = "{Latitude}, {Longitude}".format(
**ip_entity.Location.properties
)
if ip_entity.AdditionalData:
addl_props = ", ".join(
[
f"{key}={val}"
for key, val in ip_entity.AdditionalData.items()
if val
]
)
popup_text = f"{popup_text}<br>{addl_props}"
tooltip_text = f"{tooltip_text}, {addl_props}"
marker = folium.Marker(
location=[ip_entity.Location.Latitude, ip_entity.Location.Longitude],
popup=popup_text,
tooltip=tooltip_text,
icon=folium.Icon(**kwargs),
)
marker.add_to(self.folium_map)
self.locations.append(
(ip_entity.Location.Latitude, ip_entity.Location.Longitude)
)
def add_geoloc_cluster(self, geo_locations: Iterable[GeoLocation], **kwargs):
"""
Add a collection of GeoLocation objects to the map.
Parameters
----------
geo_locations : Iterable[GeoLocation]
Iterable of GeoLocation entities.
"""
ip_entities = []
for geo in geo_locations:
ip_entities.append(IpAddress(Address="na", Location=geo))
self.add_ip_cluster(ip_entities=ip_entities, **kwargs)
def add_locations(self, locations: Iterable[Tuple[float, float]], **kwargs):
"""
Add a collection of lat/long tuples to the map.
Parameters
----------
locations : Iterable[Tuple[float, float]]
Iterable of location tuples.
"""
geo_entities = [
GeoLocation(Latitude=lat, Longitude=long) for lat, long in locations
]
self.add_geoloc_cluster(geo_locations=geo_entities, **kwargs)
def get_map_center(entities: Iterable[Entity], mode: str = "modal"):
"""
Calculate median point between Entity IP locations.
Parameters
----------
entities : Iterable[Entity]
An iterable of entities containing IpAddress geolocation information.
The entities can be IpAddress entities or other entities that
have IpAddress properties.
The entities must all be of the same type.
mode : str, optional
The averaging method to use, by default "median".
"median" and "mean" are the supported values.
Returns
-------
Tuple
The Lattitude and Longitude calculated
Notes
-----
The function uses the first entity in the `entities` to determine
how to process the collection. E.g. if the first entity has properties
src_ip and dest_ip of type `IpAddress`, these are the only properties
that will be processed for the remainder of the entities.
"""
ip_entities: List[IpAddress] = []
loc_entities: List[GeoLocation] = []
if not entities:
return (0, 0)
entities = list(entities)
if isinstance(entities[0], IpAddress):
return get_center_ip_entities(entities) # type: ignore
loc_props = [
p_name
for p_name, p_val in entities[0].properties.items()
if isinstance(p_val, (IpAddress, GeoLocation))
]
for entity in entities:
for prop in loc_props:
if prop not in entity:
continue
loc_entity = entity[prop]
if isinstance(loc_entity, IpAddress):
ip_entities.append(loc_entity)
elif isinstance(loc_entity, GeoLocation):
loc_entities.append(loc_entity)
locs_ips = _extract_locs_ip_entities(ip_entities)
return get_center_geo_locs(locs_ips + loc_entities, mode=mode)
def _extract_locs_ip_entities(ip_entities: Iterable[IpAddress]):
"""Return the list of IP entities that have a Location property."""
if isinstance(ip_entities[0], list): # type: ignore
ip_locs = [
ip[0]["Location"] # type: ignore
for ip in ip_entities
if bool(ip[0].Location) # type: ignore
]
else:
ip_locs = [ip["Location"] for ip in ip_entities if bool(ip.Location)]
return ip_locs
def get_center_ip_entities(
ip_entities: Iterable[IpAddress], mode: str = "median"
) -> Tuple[float, float]:
"""
Return the geographical center of the IP address locations.
Parameters
----------
ip_entities : Iterable[IpAddress]
IpAddress entities with location information
mode : str, optional
The averaging method to us, by default "median".
"median" and "mean" are the supported values.
Returns
-------
Tuple[Union[int, float], Union[int, float]]
Tuple of latitude, longitude
"""
ip_locs_longs = _extract_locs_ip_entities(ip_entities)
return get_center_geo_locs(ip_locs_longs, mode=mode)
def _extract_coords_loc_entities(loc_entities: Iterable[GeoLocation]):
"""Return list of coordinate tuples from GeoLocation entities."""
return [
(loc["Latitude"], loc["Longitude"])
for loc in loc_entities
if "Latitude" in loc and "Longitude" in loc
]
def get_center_geo_locs(
loc_entities: Iterable[GeoLocation], mode: str = "median"
) -> Tuple[float, float]:
"""
Return the geographical center of the geo locations.
Parameters
----------
loc_entities : Iterable[GeoLocation]
GeoLocation entities with location information
mode : str, optional
The averaging method to use, by default "median".
"median" and "mean" are the supported values.
Returns
-------
Tuple[Union[int, float], Union[int, float]]
Tuple of latitude, longitude
"""
lat_longs = _extract_coords_loc_entities(loc_entities)
return _get_center_coords(lat_longs, mode=mode)
def _get_center_coords(
locations: Iterable[Tuple[float, float]], mode: str = "median"
) -> Tuple[float, float]:
"""Return the center (median) of the coordinates."""
if not locations:
return 0, 0
locs = list(locations)
if mode == "median":
try:
return (
stats.median([loc[0] for loc in locs if not math.isnan(loc[0])]),
stats.median([loc[1] for loc in locs if not math.isnan(loc[1])]),
)
except stats.StatisticsError:
pass
return (
stats.mean([loc[0] for loc in locs if not math.isnan(loc[0])]),
stats.mean([loc[1] for loc in locs if not math.isnan(loc[1])]),
)
| mit | -5,948,565,719,242,640,000 | 32.064706 | 85 | 0.562444 | false |
SUNET/eduid-webapp | src/eduid_webapp/letter_proofing/views.py | 1 | 8577 | # -*- coding: utf-8 -*-
from flask import Blueprint, abort
from eduid_common.api.decorators import MarshalWith, UnmarshalWith, can_verify_identity, require_user
from eduid_common.api.exceptions import AmTaskFailed, MsgTaskFailed
from eduid_common.api.helpers import add_nin_to_user, check_magic_cookie, verify_nin_for_user
from eduid_common.api.messages import CommonMsg, FluxData, error_response, success_response
from eduid_common.misc.timeutil import utc_now
from eduid_userdb import User
from eduid_userdb.logs import LetterProofing
from eduid_userdb.proofing import ProofingUser
from eduid_webapp.letter_proofing import pdf, schemas
from eduid_webapp.letter_proofing.app import current_letterp_app as current_app
from eduid_webapp.letter_proofing.ekopost import EkopostException
from eduid_webapp.letter_proofing.helpers import LetterMsg, check_state, create_proofing_state, get_address, send_letter
__author__ = 'lundberg'
letter_proofing_views = Blueprint('letter_proofing', __name__, url_prefix='', template_folder='templates')
@letter_proofing_views.route('/proofing', methods=['GET'])
@MarshalWith(schemas.LetterProofingResponseSchema)
@require_user
def get_state(user) -> FluxData:
current_app.logger.info('Getting proofing state for user {}'.format(user))
proofing_state = current_app.proofing_statedb.get_state_by_eppn(user.eppn, raise_on_missing=False)
if proofing_state:
current_app.logger.info('Found proofing state for user {}'.format(user))
result = check_state(proofing_state)
if result.is_expired and current_app.conf.backwards_compat_remove_expired_state:
current_app.logger.info(f'Backwards-compat removing expired state for user {user}')
current_app.proofing_statedb.remove_state(proofing_state)
current_app.stats.count('letter_expired')
return success_response(message=LetterMsg.no_state)
return result.to_response()
return success_response(message=LetterMsg.no_state)
@letter_proofing_views.route('/proofing', methods=['POST'])
@UnmarshalWith(schemas.LetterProofingRequestSchema)
@MarshalWith(schemas.LetterProofingResponseSchema)
@can_verify_identity
@require_user
def proofing(user: User, nin: str) -> FluxData:
current_app.logger.info('Send letter for user {} initiated'.format(user))
proofing_state = current_app.proofing_statedb.get_state_by_eppn(user.eppn, raise_on_missing=False)
# No existing proofing state was found, create a new one
if not proofing_state:
# Create a LetterNinProofingUser in proofingdb
proofing_state = create_proofing_state(user.eppn, nin)
current_app.logger.info('Created proofing state for user {}'.format(user))
# Add the nin used to initiate the proofing state to the user
# NOOP if the user already have the nin
add_nin_to_user(user, proofing_state)
if proofing_state.proofing_letter.is_sent:
current_app.logger.info('A letter has already been sent to the user.')
current_app.logger.debug('Proofing state: {}'.format(proofing_state.to_dict()))
result = check_state(proofing_state)
if result.error:
# error message
return result.to_response()
if not result.is_expired:
return result.to_response()
current_app.logger.info('The letter has expired. Sending a new one...')
current_app.proofing_statedb.remove_state(proofing_state)
current_app.logger.info(f'Removed {proofing_state}')
current_app.stats.count('letter_expired')
proofing_state = create_proofing_state(user.eppn, nin)
current_app.logger.info(f'Created new {proofing_state}')
try:
address = get_address(user, proofing_state)
if not address:
current_app.logger.error('No address found for user {}'.format(user))
return error_response(message=LetterMsg.address_not_found)
except MsgTaskFailed:
current_app.logger.exception(f'Navet lookup failed for user {user}')
current_app.stats.count('navet_error')
return error_response(message=CommonMsg.navet_error)
# Set and save official address
proofing_state.proofing_letter.address = address
current_app.proofing_statedb.save(proofing_state)
try:
campaign_id = send_letter(user, proofing_state)
current_app.stats.count('letter_sent')
except pdf.AddressFormatException:
current_app.logger.exception('Failed formatting address')
current_app.stats.count('address_format_error')
return error_response(message=LetterMsg.bad_address)
except EkopostException:
current_app.logger.exception('Ekopost returned an error')
current_app.stats.count('ekopost_error')
return error_response(message=CommonMsg.temp_problem)
# Save the users proofing state
proofing_state.proofing_letter.transaction_id = campaign_id
proofing_state.proofing_letter.is_sent = True
proofing_state.proofing_letter.sent_ts = utc_now()
current_app.proofing_statedb.save(proofing_state)
result = check_state(proofing_state)
result.message = LetterMsg.letter_sent
return result.to_response()
@letter_proofing_views.route('/verify-code', methods=['POST'])
@UnmarshalWith(schemas.VerifyCodeRequestSchema)
@MarshalWith(schemas.VerifyCodeResponseSchema)
@require_user
def verify_code(user: User, code: str) -> FluxData:
current_app.logger.info('Verifying code for user {}'.format(user))
proofing_state = current_app.proofing_statedb.get_state_by_eppn(user.eppn, raise_on_missing=False)
if not proofing_state:
return error_response(message=LetterMsg.no_state)
# Check if provided code matches the one in the letter
if not code == proofing_state.nin.verification_code:
current_app.logger.error('Verification code for user {} does not match'.format(user))
# TODO: Throttling to discourage an adversary to try brute force
return error_response(message=LetterMsg.wrong_code)
state_info = check_state(proofing_state)
if state_info.error:
return state_info.to_response()
if state_info.is_expired:
# This is not an error in the get_state view, but here it is an error so 'upgrade' it.
state_info.error = True
current_app.logger.warning(f'Tried to validate expired state: {proofing_state}')
return state_info.to_response()
try:
# Fetch registered address again, to save the address of record at time of verification.
official_address = get_address(user, proofing_state)
except MsgTaskFailed:
current_app.logger.exception(f'Navet lookup failed for user {user}')
current_app.stats.count('navet_error')
return error_response(message=CommonMsg.navet_error)
proofing_log_entry = LetterProofing(
eppn=user.eppn,
created_by='eduid_letter_proofing',
nin=proofing_state.nin.number,
letter_sent_to=proofing_state.proofing_letter.address,
transaction_id=proofing_state.proofing_letter.transaction_id,
user_postal_address=official_address,
proofing_version='2016v1',
)
try:
# Verify nin for user
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
if not verify_nin_for_user(proofing_user, proofing_state, proofing_log_entry):
current_app.logger.error(f'Failed verifying NIN for user {user}')
return error_response(message=CommonMsg.temp_problem)
current_app.logger.info(f'Verified code for user {user}')
# Remove proofing state
current_app.proofing_statedb.remove_state(proofing_state)
current_app.stats.count(name='nin_verified')
return success_response(
payload=dict(nins=proofing_user.nins.to_list_of_dicts()), message=LetterMsg.verify_success
)
except AmTaskFailed:
current_app.logger.exception(f'Verifying nin for user {user} failed')
return error_response(message=CommonMsg.temp_problem)
@letter_proofing_views.route('/get-code', methods=['GET'])
@require_user
def get_code(user):
"""
Backdoor to get the verification code in the staging or dev environments
"""
try:
if check_magic_cookie(current_app.conf):
state = current_app.proofing_statedb.get_state_by_eppn(user.eppn)
return state.nin.verification_code
except Exception:
current_app.logger.exception(f"{user} tried to use the backdoor to get the letter verification code for a NIN")
abort(400)
| bsd-3-clause | 6,693,204,529,694,503,000 | 44.142105 | 120 | 0.710505 | false |
shagi/cmsplugin_gallery_filer | cmsplugin_gallery/migrations/0004_auto__add_field_image_src_new.py | 2 | 11497 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Image.src_new'
db.add_column('cmsplugin_gallery_image', 'src_new',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Image.src_new'
db.delete_column('cmsplugin_gallery_image', 'src_new_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_gallery.galleryplugin': {
'Meta': {'object_name': 'GalleryPlugin', 'db_table': "'cmsplugin_galleryplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'cmsplugin_gallery/gallery.html'", 'max_length': '255'})
},
'cmsplugin_gallery.image': {
'Meta': {'ordering': "('inline_ordering_position',)", 'object_name': 'Image'},
'alt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_gallery.GalleryPlugin']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inline_ordering_position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'src_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'src_new': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'src_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_gallery'] | bsd-2-clause | 234,057,685,695,669,000 | 79.971831 | 182 | 0.553971 | false |
ivmech/iviny-scope | lib/xlsxwriter/chart_stock.py | 1 | 3290 | ###############################################################################
#
# ChartStock - A class for writing the Excel XLSX Stock charts.
#
# Copyright 2013, John McNamara, [email protected]
#
from . import chart
class ChartStock(chart.Chart):
"""
A class for writing the Excel XLSX Stock charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartStock, self).__init__()
if options is None:
options = {}
self.show_crosses = 0
self.hi_low_lines = {}
self.date_category = True
# Override and reset the default axis values.
self.x_axis['defaults']['num_format'] = 'dd/mm/yyyy'
self.x2_axis['defaults']['num_format'] = 'dd/mm/yyyy'
self.set_x_axis({})
self.set_x2_axis({})
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:stockChart element.
self._write_stock_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_stock_chart(self, args):
# Write the <c:stockChart> element.
# Overridden to add hi_low_lines().
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
# Add default formatting to the series data.
self._modify_series_formatting()
self._xml_start_tag('c:stockChart')
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:dropLines element.
self._write_drop_lines()
# Write the c:hiLowLines element.
if args.get('primary_axes'):
self._write_hi_low_lines()
# Write the c:upDownBars element.
self._write_up_down_bars()
# Write the c:marker element.
self._write_marker_value()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:stockChart')
def _modify_series_formatting(self):
# Add default formatting to the series data.
index = 0
for series in self.series:
if index % 4 != 3:
if not series['line']['defined']:
series['line'] = {'width': 2.25,
'none': 1,
'defined': 1}
if series['marker'] is None:
if index % 4 == 2:
series['marker'] = {'type': 'dot', 'size': 3}
else:
series['marker'] = {'type': 'none'}
index += 1
| gpl-3.0 | 6,407,259,483,271,581,000 | 26.881356 | 79 | 0.426748 | false |
mac389/brainpy | lib/Analysis.py | 1 | 1379 | from brian import *
from itertools import product
import neuroTools as postdoc
from scipy.signal import fftconvolve
from time import time
import sortUtils as tech
import ISIpy as ISIpy
filenames = ['/Volumes/My Book/Rat/010113_real/continuous/ch045/ch045.spiketimes',
'/Volumes/My Book/Rat/010113_real/continuous/ch045/ch045.spiketimes']
ld = time()
isCCF=False
isLZC = False
if isLZC or isCCF:
print 'Loading Data -> ',
data = [loadtxt(filename,delimiter='\t')*.1*ms for filename in filenames]
#Recording must be a list of or generator expression for the lists of spiketimes
print 'Loaded'
w = 20
if isCCF:
print 'Calculating CCFs -> ',
ccfs = [CCVF(one,two,width=w*ms) for one,two in product(data,data)]
ccfs = map(lambda ccf: ccf/ccf.max(),ccfs)
print 'Calculated'
rowL=len(ccfs)/2
colL=rowL
acf_panel,ax=subplots(rowL,colL, sharex=True, sharey=True)
#Should use absolute not relative normalization
#Currently use absolute motivation
for i in range(rowL):
for j in range(colL):
print i+j-1
ax[i,j].plot(arange(-w,w),ccfs[i+j+1])
ax[i,j].set_ylabel('Covariance')
ax[i,j].set_xlabel('Time (ms)')
postdoc.adjust_spines(ax[i,j],['bottom','left'])
show()
if isLZC:
print '------'
print 'Computing LZ:', [postdoc.LZC(datum) for datum in data]
print '------'
ISID = ISIpy.ISIpy(data_location=filenames)
#Get time series for LZ
| gpl-3.0 | -8,916,014,207,065,413,000 | 23.642857 | 82 | 0.708484 | false |
Grassboy/plugin.video.plurkTrend | youtube_dl/extractor/videofyme.py | 19 | 1697 | import re
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
determine_ext,
)
class VideofyMeIE(InfoExtractor):
_VALID_URL = r'https?://(www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
IE_NAME = u'videofy.me'
_TEST = {
u'url': u'http://www.videofy.me/thisisvideofyme/1100701',
u'file': u'1100701.mp4',
u'md5': u'c77d700bdc16ae2e9f3c26019bd96143',
u'info_dict': {
u'title': u'This is VideofyMe',
u'description': None,
u'uploader': u'VideofyMe',
u'uploader_id': u'thisisvideofyme',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id,
video_id)
video = config.find('video')
sources = video.find('sources')
url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
for key in ['on', 'av', 'off']] if node is not None)
video_url = url_node.find('url').text
return {'id': video_id,
'title': video.find('title').text,
'url': video_url,
'ext': determine_ext(video_url),
'thumbnail': video.find('thumb').text,
'description': video.find('description').text,
'uploader': config.find('blog/name').text,
'uploader_id': video.find('identifier').text,
'view_count': re.search(r'\d+', video.find('views').text).group(),
}
| mit | 782,440,485,634,443,300 | 35.891304 | 98 | 0.519151 | false |
hendrikwout/pynacolada | trash/pynacolada-20131101-2-old.py | 1 | 25756 | import pickle
import pylab as pl
from operator import itemgetter
import scipy.io as io
import numpy as np
import sys
from operator import mul
class SomeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def rwicecube(filestream,shp,refiter,dimiter,dimpos,refnoiter,dimnoiter,icecube,vtype,vsize,voffset,rwchsize,mode):
"""
read or write data icecube from binary data and put it in an array
filestream: binary file reference
shp: shape of the filestream
refiter: reference to dimensions over which no slice is performed
pos: current index position of the non-sliced dimensions
"""
# e.g. shp = (200,100,50,50,20)
# refiter = (1,3,4)
# dimpos = (5,10,9)
# extend so that structured arrays are read at once
lennoiter = long(1)
for irefnoiter,erefnoiter in enumerate(refnoiter):
lennoiter = lennoiter*dimnoiter[irefnoiter]
fpos = 0
# e.g. fpos = (9)+ 20*(10) + 50*50*20*(5)
for idimpos,edimpos in enumerate(dimpos):
curadd = edimpos
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
# exclude trivial special case of only 1 iteration step
# --> in that case fpos is just zero.
if refiter != [-1]:
if ((refiter[idimpos] + 1) < len(shp)):
for i in range(refiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fpos = fpos + curadd
# Initialize (for reading) or prepare (for writing) icecube array
if mode == 'read':
icecube = np.zeros((lennoiter,),dtype=vtype)*np.nan
elif mode == 'write':
icecube = np.reshape(icecube,(lennoiter,))
dimnoiterpos = [0]*len(dimnoiter)
# print icecube,dimnoiterpos
j = 0
while j < lennoiter:
fposicecube = fpos
for idimpos,edimpos in enumerate(dimnoiterpos):
curadd = edimpos
# e.g. fposicecube = (1)*52
# e.g. fposicecube = (9)+ 20*(10) + 50*50*20*(5)
if ((refnoiter[idimpos] + 1) < len(shp)):
for i in range(refnoiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fposicecube = fposicecube + curadd
filestream.seek(voffset+vsize*fposicecube)
if mode == 'read':
icecube[j:(j+rwchsize)] = np.fromfile(filestream,dtype=vtype,count=rwchsize)
elif mode == 'write':
filestream.seek(vsize*fposicecube)
filestream.write(icecube[j:(j+rwchsize)])
# go to next data strip
if dimnoiterpos != []:
# rwchsize: allow reading of chunks for the inner dimensions
dimnoiterpos[-1] = dimnoiterpos[-1] + rwchsize
for idimidx,edimidx in enumerate(reversed(dimnoiterpos)):
if idimidx > 0:
while dimnoiterpos[idimidx] >= dimnoiter[idimidx]:
dimnoiterpos[idimidx-1] = dimnoiterpos[idimidx-1] + 1
dimnoiterpos[idimidx] -= dimnoiter[idimidx]
j = j+rwchsize
icecube.shape = dimnoiter
if mode == 'read':
return icecube
def readicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,vtype,vsize,voffset,rwchsize):
"""
read an icecube by sorting the indices (highest at the back).
perform an in-memory Post Swap of dimensions (very fast) to compensate for the sorting.
we allow reading in chunks according to the inner dimensions. They will be mostly there because we allow an max-icecubesize
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
icecube =rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,None,vtype,vsize,voffset,rwchsize,'read')
# build the 'inverse permutation' operator for tranposition before writeout
inv = range(len(trns))
for itrns, etrns in enumerate(trns):
inv[etrns] = itrns
return np.transpose(icecube,inv)
def writeicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,data,vtype,vsize,voffset,rwchsize):
"""
write an icecube and perform an in-memory Post Swap of dimensions before (very fast)
hereby, we acquire the order of the icecube dimensions
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,np.transpose(data,trns),vtype,vsize,voffset,rwchsize,'write')
def ncvartypeoffset(ncfile,var):
""" purpose: get binary data type and offset of a variable in netcdf file
unfortunately, getting these properties are not explicitely implemented in scipy, but most of this code is stolen from scipy: /usr/lib/python2.7/dist-packages/scipy/io/netcdf.py
ncfile is a scipy.io.netcdf.netcdf_file
var variable we want to calculate the offset from
"""
oripos=ncfile.fp.tell()
ncfile.fp.seek(0)
magic = ncfile.fp.read(3)
ncfile.__dict__['version_byte'] = np.fromstring(ncfile.fp.read(1), '>b')[0]
# Read file headers and set data.
ncfile._read_numrecs()
ncfile._read_dim_array()
ncfile._read_gatt_array()
header = ncfile.fp.read(4)
count = ncfile._unpack_int()
vars = []
for ic in range(count):
vars.append(list(ncfile._read_var()))
ivar = np.where(np.array(vars) == var)[0][0]
ncfile.fp.seek(oripos)
return vars[ivar][6] , vars[ivar][7]
var = 'QV'
ncfile = io.netcdf.netcdf_file('/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf.nc','r')
shp = ncfile.variables[var].shape
vsize = ncfile.variables[var].itemsize()
vtype, voffset = ncvartypeoffset(ncfile,var)
fin = ncfile.fp
refapplyout= (1,)
fout = io.netcdf.netcdf_file('/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf2.nc','w')
func = lambda x,y,z,u: np.array([[[np.mean(x)]],[[np.mean(x)]]]) # *(1.+np.zeros(x.shape))
#sizeout(shp,func,dimapplyref)
# extract shpout from shp[:],dimref[:],dimapplyref[:],func
adimsin = ((365,40,100,300),\
(200,40),\
(365,40,100),\
(1,200,100),\
)
avtypeoutspec = [None]
adnamsin = (('time','z','lat','lon'),\
('lon','z'),\
('time','z','lat'),\
('time','lon','lat'))
dnamsel = ('lon','time','t')
maxicecubesize=100000000
# construction of the output dimensions
dimsout = [] # maximum length of an output dimension
dnamout = []
idimsout = 0
for idnamsin,ednamsin in enumerate(adnamsin):
for idnam,ednam in reversed(list(enumerate(ednamsin))):
if ednam not in dnamout:
dnamout.insert(0,ednam)
print ednam
if ednam not in dnamsel:
dimsout.insert(0,adimsin[idnamsin][idnam])
print ednam
else:
# In this case, wait for assigning the output dimensions. This actually depends on the specified function
dimsout.insert(0,None)
else:
if ((adimsin[idnamsin][idnam] != 1) & (dimsout[dnamout.index(ednam)] != 1) & \
# we allow non-equal dimension lengths, as long as the dimension is covered/captured by the function
# maybe still allow non-equal dimension length not covered by the function????
(dimsout[dnamout.index(ednam)] != None) & \
(adimsin[idnamsin][idnam] != dimsout[dnamout.index(ednam)])):
raise SomeError("The corresponding output dnamensions (index: "+str(dnamout.index(ednam))+") of the input variable "+str(idnamsin)+ " "+ str(idnam)+ " "+" have a different length and not equal to 1.")
else:
if (dimsout[dnamout.index(ednam)] != None):
dimsout[dnamout.index(ednam)] = max(dimsout[dnamout.index(ednam)],adimsin[idnamsin][idnam])
print 'Output dimensions: ', zip(dnamout,dimsout)
idnam = 0
# ad the missing dimensions selected for the function
for idnamsel,ednamsel in enumerate(dnamsel):
if ednamsel not in dnamout:
dnamout.insert(idnam,ednamsel)
dimsout.insert(idnam,None) # to be defined from the function
idnam = idnam+1 # moet dit ook hier niet boven geimplementeerd worden?
else:
idnam = dnamout.index(ednam)+1
# copy adnams
adnams = list([])
for idnamsin,ednamsin in enumerate(adnamsin):
adnams.append(list(ednamsin))
for idnams,ednams in enumerate(adnams):
idnam = 0
for idnamout,ednamout in enumerate(dnamout):
if ednamout not in ednams:
ednams.insert(idnam,ednamout)
idnam = idnam + 1
else:
idnam = ednams.index(ednamout) + 1
adims = []
arefs = []
for idims,edims in enumerate(adimsin):
arefs.append(list([]))
adims.append(list([]))
# dnamout.index()
for idims,edims in enumerate(adimsin):
for idim,edim in enumerate(dimsout):
arefs[idims].append(dnamout.index(adnams[idims][idim]))
if dnamout[arefs[idims][-1]] in adnamsin[idims]:
adims[idims].append(adimsin[idims][adnamsin[idims].index(dnamout[arefs[idims][-1]])])
else:
adims[idims].append(1)
adims = np.array(adims,dtype=np.int32)
arefs = np.array(arefs,dtype=np.int32)
# adimssw: the input dimensions (like adims), but ordered according to the output dimensions
adimssw = np.zeros_like(arefs)
arefssw = np.zeros_like(arefs)
for irefs,erefs in enumerate(arefs):
for iref,eref in enumerate(arefs[irefs]):
adimssw[irefs,arefs[irefs][iref]] = adims[irefs,iref]
arefssw[irefs,arefs[irefs][iref]] = iref
refapplyout = []
for idnamsel,ednamsel in enumerate(dnamsel):
refapplyout.append(dnamout.index(ednamsel))
# adimapplyin: the input dimensions of the function based on the refapplyout
# arefapply = [list([])]*len(arefs)
# adimapplyin = np.array([list([None]*len(refapplyout))]*len(arefs))
adimapplyin = np.zeros((len(arefs),len(refapplyout)),dtype='int32')
for irefapplyout,erefapplyout in enumerate(refapplyout):
for idims,edims in enumerate(adims):
adimapplyin[idims,irefapplyout] = adims[idims][np.where(arefs[idims] == erefapplyout)[0][0]]
dummydat = []
avtypeout = []
for idimapply,edimapply in enumerate(adimapplyin):
dummydat.append(np.zeros(edimapply))
ddout = func(*dummydat)
if (type(ddout).__name__ != 'list'):
ddout = list([ddout])
for iddout in range(len(ddout)):
ddout[iddout] = np.array(ddout[iddout])
if (len(np.array(ddout[iddout]).shape) != len(adimapplyin[iddout])):
raise SomeError('The amount of input ('+str(len(adimapplyin[iddout]))+') and output dimensions ('+str(len(ddout[iddout].shape))+') of function is not the same')
# determine datatype (dtype) output
if avtypeoutspec[iddout] != None:
# adopt the one from the output file
avtypeout.append(avtypeout[iddout])
else:
# otherwise adopt the one from the function output
avtypeout.append(ddout[iddout].dtype)
adimsout = []
adimapplyout = []
for iddout,eddout in enumerate(ddout):
adimsout.append(list(dimsout))
adimapplyout.append([None]*len(refapplyout))
print iddout
# arefout = [0,1,2,3,4]
adimapplyout = []
alenapplyout = []
for idimsout,edimsout in enumerate(adimsout):
adimapplyout.append([])
for irefapplyout,erefapplyout in enumerate(refapplyout):
adimsout[idimsout][erefapplyout] = ddout[idimsout].shape[irefapplyout]
adimapplyout[idimsout].append(ddout[idimsout].shape[irefapplyout]) # adimsout[idimsout][arefs[idims].index(erefapplyout)]
alenapplyout.append(reduce(mul,adimapplyout[idimsout]))
# we want to read the data in chunks (icecubes) as big as possible. In the first place, the data chunks contain of course the dimensions on which the functions are applied. Afterwards, the chunk dimensions is extended (in the outer(!) direction) to make the icecubes bigger.
# refnoiter: reference to dimensions that are swapped to the back. In any case, this needs to include all refapplyouts. Data in these dimensions are read in icecubes. The order of those indices are taken into account. We also add in front those dimensions that can be read at once (still needs to be tested!).
# the total length of the numpy array as IO memory buffer ('icecubes'). The programm will try to read this in chunks (cfr. rwchunksize- as large as possible. An in-memory transposition may be applied after read or before writing.
# the dimension of the IO buffer array
# the total length of data passed to function
alenapply = []
alennoiter = []
# important remark!!! we consider that the number of input dimensions of the function is equal to the number of output dimensions!!!!!
# (a)refapply = (a)refapplyout
# adimapply: the dimensions of data to be read in chunks, ordered
adimnoiter = []
for idimsswapply,edimsswapply in enumerate(adimapplyin):
alenapply.append( reduce(mul,edimsswapply))
adimnoiter.append([])
alennoiter = np.array(alenapply)
refnoiterout = []
for irefapplyout,erefapplyout in enumerate(refapplyout):
refnoiterout.append(int(erefapplyout))
for irefapplyout,erefapplyout in enumerate(refapplyout):
for idimsswapply,edimsswapply in enumerate(adimapplyin):
adimnoiter[idimsswapply].append(int(adimapplyin[idimsswapply,irefapplyout]))
# for now:
# adimnoiter = adimapplyin, but will be appended below
# refnoiterout = refapplyout
# we now will try to read the data in even larger icecubes!
if (max(alennoiter) > maxicecubesize):
print 'Warning, one of the function data input lengths "',alennoiter,'" (dimensions: ',adimapplyin,') exceeds the maximum icecubesize of '+str(maxicecubesize)+'.'
else:
# we try will to read the data in even larger icecubes!
idim = arefs.shape[1]-1
# emaxdim = max(adimssw[:,idim])
while ((idim >= 0) & ((max(alennoiter)*max(adimssw[:,idim])) < maxicecubesize)):
print idim
if (idim not in refnoiterout):
print 'idim',idim
refnoiterout.insert(0,int(idim))
for idimsapply,emaxdimsapply in enumerate(adimapplyin):
adimnoiter[idimsapply].insert(0,adimssw[idimsapply,idim])
alennoiter[idimsapply] = alennoiter[idimsapply] * adimssw[idimsapply,idim]
# print 'yeeps',idim,emaxdim,refnoiterout,dimnoiter,lennoiter, maxicecubesize
idim = idim - 1
print 'Icecubesizes are: ',alennoiter #,dimnoiter,refnoiterout
for idimsout,edimsout in enumerate(dimsout):
dimsout[idimsout] = max(np.array(adimsout)[:,idimsout])
dimnoiterout = []
for irefnoiterout,erefnoiterout in enumerate(refnoiterout):
dimnoiterout.append(dimsout[erefnoiterout])
dimiter = []
# guess from residual dimensions that are not in refnoiterout
refiter = None
if refiter == None:
refiter = []
for idimsout,edimsout in enumerate(dimsout):
if idimsout not in refnoiterout:
refiter.append(idimsout)
adimiter = []
adimiterpos = []
aleniter = []
for idimssw,edimssw in enumerate(adimssw):
adimiter.append([])
adimiterpos.append([])
aleniter.append(1)
for erefiter in refiter:
adimiter[idimssw].append(int(adimssw[idimssw][erefiter]))
aleniter[idimssw] = aleniter[idimssw]*adimiter[idimssw][-1]
# the trivial case of only one iteration
if adimiter[idimssw] == []:
adimiter[idimssw].append(1)
adimiterpos[idimssw].append(0)
else:
adimiterpos[idimssw].append([0]*len(refiter))
adimiterout = []
# adimiterposout = []
aleniterout = []
for idimsout,edimsout in enumerate(adimsout):
adimiterout.append([])
# adimiterposout.append([])
aleniterout.append(1)
for erefiter in refiter:
adimiterout[idimsout].append(int(adimsout[idimsout][erefiter]))
aleniterout[idimsout] = aleniterout[idimsout]*adimiterout[idimsout][-1]
# the trivial case of only one iteration
if adimiterout[idimsout] == []:
adimiterout[idimsout].append(1)
# adimiterposout[idimsout].append(0)
#else:
# adimiterposout[idimsout].append([0]*len(refiter))
for idims,edims in enumerate(adims):
if refiter == []:
refiter = [-1]
arefsiter = []
for irefs,erefs in enumerate(arefs):
arefsiter.append([])
for iref,eref in enumerate(refiter):
if eref != -1:
arefsiter[irefs].append(arefssw[irefs][eref])
arefsnoiter = []
for irefs,erefs in enumerate(arefs):
arefsnoiter.append([])
for iref,eref in enumerate(refnoiterout):
if eref != -1:
arefsnoiter[irefs].append(arefssw[irefs][eref])
arefsapply = []
for irefs,erefs in enumerate(arefs):
arefsapply.append([])
for iref,eref in enumerate(refapplyout):
if eref != -1:
arefsapply[irefs].append(arefssw[irefs][eref])
lennoiterout = reduce(mul,dimnoiterout)
# maximum of both input and output dimensions for the iteration
dimitermax =np.zeros(np.array(adimiter).shape[1],dtype=np.int32)
for idimitermax,edimitermax in enumerate(dimitermax):
for idimiter,edimiter in enumerate(adimiter):
dimitermax[idimitermax] = max(dimitermax[idimitermax],adimiter[idimiter][idimitermax])
# maximum of both input and output dimensions for the iteration
for idimiterout,edimiterout in enumerate(adimiterout):
dimitermax[idimitermax] = max(dimitermax[idimitermax],adimiterout[idimiterout][idimitermax])
lenitermax = reduce(mul,dimitermax)
dimiterpos = [0]*len(dimitermax)
# short overview:
# # arefssw: the references of the output dimensions (adimssw) to the data dimensions
# # arefs: the references of the data dimensions (adims) to the output dimensions
# # arefsiter: refences of the looping dimensions to the data dimensions
# # arefsnoiter: refences of the non-looping dimensions to the data dimensions
# get the maximum size of continuous data chunks for more efficient IO
rwchunksize = [1]*len(arefsnoiter)
for idims in range(len(arefsnoiter)):
idim = len(adimnoiter[idims])
while ((idim in arefsnoiter[idims]) & (idim >= 0)):
# The inner dimensions just have to be referenced so not in correct order. We know that they will be read in the correct order in the end
rwchunksize[idims] = rwchunksize[idims]*adims[idims][idim]
idim = idim - 1
rwchunksizeout = [1]*len(adimsout)
idim = len(dimnoiterout)
while ((idim in refnoiterout) & (idim >= 0)):
# The inner dimensions just have to be referenced and not in correct order. We know that they will be read in the correct order in the end
for idimsout,edimsout in enumerate(adimsout):
rwchunksizeout[idimsout] = rwchunksizeout[idimsout]*adimsout[idimsout][idim]
idim = idim - 1
adimnoiterout = []
alennoiterout = []
for idimsout,edimsout in enumerate(adimsout):
adimnoiterout.append([])
for iref,eref in enumerate(refnoiterout):
adimnoiterout[idimsout].append(adimsout[idimsout][eref])
alennoiterout.append(reduce(mul,adimnoiterout[idimsout]))
# get the dimensions of the buffer over which we iterate
# we know that the function are applied along dimensions that are at the inner data
adimnoapply = []
alennoapply = []
for irefs,erefs in enumerate(arefs):
adimnoapply.append([])
alennoapply.append(1)
for irefnoiterout in range(len(arefsnoiter[irefs])-len(refapplyout)):
adimnoapply[irefs].append(adimnoiter[irefs][irefnoiterout])
alennoapply[irefs] =alennoapply[irefs]*adimnoapply[irefs][-1]
if adimnoapply[irefs] == []:
adimnoapply[irefs] = [1]
adimnoapplyout = []
alennoapplyout = []
for idimsout in range(len(adimsout)):
adimnoapplyout.append([])
alennoapplyout.append(1)
for irefnoiterout in range(len(refnoiterout)-len(refapplyout)):
adimnoapplyout[idimsout].append(adimnoiterout[idimsout][irefnoiterout])
alennoapplyout[idimsout] = alennoapplyout[idimsout]*adimnoapplyout[idimsout][-1]
if adimnoapplyout[idimsout] == []:
adimnoapplyout[idimsout] = [1]
dimnoapply = [1]*len(adimnoapply[1])
for idimnoapply in range(len(dimnoapply)):
for idims,edims in enumerate(adimnoapply):
dimnoapply[idimnoapply] = max(dimnoapply[idimnoapply],adimnoapply[idims][idimnoapply])
for idims,edims in enumerate(adimnoapplyout):
dimnoapply[idimnoapply] = max(dimnoapply[idimnoapply],adimnoapplyout[idims][idimnoapply])
lennoapply = reduce(mul,dimnoapply)
dimnoapplypos = [0]*len(dimnoapply)
print str(0)+'/'+str(lenitermax),
for j in range(lenitermax):
# reading icecube, rearranged in the order of dimensions specified by refnoiterout
dataicecube = []
for ilennoiter,elennoiter in enumerate(alennoiter):
#dataicecube.append(np.array(readicecubeps(fin,adims[irefs],arefsiter[irefs],adimiter[irefs],adimiterpos[irefs],arefsnoiter[irefs],adimnoiter[irefs],vtype,vsize[irefs],voffset[irefs],rwchunksize[irefs]),dtype=vtype).ravel()
dataicecube.append(np.zeros((elennoiter,)))
#np.array(readicecubeps(fin,adims[0],arefsiter[0],adimiter[0],adimiterpos[0],arefsnoiter[0],adimnoiter[0],vtype,vsize,voffset,rwchunksize),dtype=vtype).ravel()
# temporary store output in a array-buffer
dataicecubeout = []
for ilennoiterout,elennoiterout in enumerate(alennoiterout):
dataicecubeout.append(np.zeros((elennoiterout,),dtype=avtypeout[ilennoiterout]))
# crush the ice
# refnoiterout = (6 ,7 ,8 ,4 ,5)
# dimiter = (30,20,15,20,15)
# refapplyout = (8 ,4 ,5)
for k in range(lennoapply):
# actually, this is just the end of the file output already written
ahunk = []
for irefs, erefs in enumerate(arefs):
pos = 0
# e.g. pos = (9)+ 20*(10) + 50*50*20*(5)
for idimpos,edimpos in enumerate(dimnoapplypos):
curadd = np.mod(edimpos,adimnoapply[irefs][idimpos])
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
if ((idimpos + 1) < len(arefsnoiter[irefs])):
for i in range(idimpos + 1,len(arefsnoiter[irefs])) :
# here, we assume that the dimensions of the chunk are already in the order considered by adimsnoiter(out) etc. (cfr. preceeded transposition in readicecubeps)
curadd = curadd * adimnoiter[irefs][i]
# curaddout = curaddout * dimnoiteroutref[i]
pos = pos + curadd
ahunk.append(dataicecube[irefs][pos:(pos+alenapply[irefs])])
ahunk[irefs].shape = adimapplyin[irefs]
# apply the function
ahunkout = np.array(func(*ahunk)) #np.array((np.zeros(hunk.shape) + 1)*np.mean(hunk),dtype=vtype)
if (type(ahunkout).__name__ != 'list'): # tbi: nog te bekijken of dit wel de handigste voorwaarde is!
ahunkout = list([ahunkout])
for ihunkout in range(len(ahunkout)):
ahunkout[ihunkout] = np.array(ahunkout[ihunkout])
# e.g. posout = (9)+ 20*(10) + 50*50*20*(5)
posout = 0
for idimpos,edimpos in enumerate(dimnoapplypos):
curadd = np.mod(edimpos,adimnoapplyout[ihunkout][idimpos])
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
if ((idimpos + 1) < len(refnoiterout)):
for i in range(idimpos + 1,len(refnoiterout)) :
# here, we assume that the idims are in the intended order (cfr. subsequent transposition in writeicecubeps)
curadd = curadd * dimnoiterout[i]
# curaddout = curaddout * dimnoiteroutref[i]
posout = posout + curadd
dataicecubeout[ihunkout][posout:(posout+alenapplyout[ihunkout])] = np.array(ahunkout[ihunkout].ravel(),dtype=avtypeout[ihunkout])
# go to next data slice
dimnoapplypos[-1] = dimnoapplypos[-1] + 1
for idimidx,edimidx in enumerate(reversed(dimnoapplypos)):
# # alternative (makes 'dimiter' redundant)
# if dimiterpos[idimidx] == shp[refiter[idimidx]]:
if idimidx > 0:
if dimnoapplypos[idimidx] == dimnoapply[idimidx]:
dimnoapplypos[idimidx-1] = dimnoapplypos[idimidx-1] + 1
dimnoapplypos[idimidx] = 0
for idimsout in range(len(dataicecubeout)):
dataicecubeout[idimsout].shape = dimnoiterout
#print dataicecubeout[idimsout].shape
# for idimsout in range(len(adimsout)):
# writeicecubeps(fout[idimsout],\
# adimsout[idimsout],\
# arefsnoiter[idimsout],\
# adimiterout[idimsout],\
# dimiterposout[idimsout],\
# arefnoiterout[idimsout],\
# adimnoiterout[idimsout],\
# dataicecubeout[idimsout],\
# vtype[idimsout],\
# vsize[idimsout],\
# voffset[idimsout],\
# rwchunksizeout[idimsout])
# go to next data slice
dimiterpos[-1] = dimiterpos[-1] + 1
for idimidx,edimidx in enumerate(reversed(dimiterpos)):
# # alternative (makes 'dimiter' redundant)
# if dimiterpos[idimidx] == shp[refiter[idimidx]]:
if dimiterpos[idimidx] == dimitermax[idimidx]:
if idimidx > 0:
dimiterpos[idimidx-1] = dimiterpos[idimidx-1] + 1
dimiterpos[idimidx] = 0
sys.stdout.write ('\b'*(len(str(j)+'/'+str(lenitermax))+1))
sys.stdout.write (str(j+1)+'/'+str(lenitermax))
| gpl-3.0 | -2,215,253,140,054,306,300 | 40.743922 | 314 | 0.656119 | false |
blaa/WaveSync | libwavesync/audio_output.py | 1 | 2409 | class AudioOutput:
"""
Output abstraction - wraps all methods of sound card required to work.
"""
def __init__(self, config, device_index, buffer_size):
# Import pyaudio only if really needed.
# pylint: disable=import-outside-toplevel
import pyaudio
self.stream = None
self.pyaudio = None
self.config = config
# Generate silence frames (zeroed) of appropriate sizes for chunks
self.silence_cache = None
self.chunk_frames = config.chunk_size / config.frame_size
if device_index == -1:
# We are tested. Don't open stream (stop at calculation of chunk_frames).
return
assert self.stream is None
self.pyaudio = pyaudio.PyAudio()
if device_index is None:
host_info = self.pyaudio.get_host_api_info_by_index(0)
device_index = host_info['defaultOutputDevice']
print("Using default output device index", device_index)
audio_format = (
pyaudio.paInt24
if config.sample == 24
else pyaudio.paInt16
)
self.stream = self.pyaudio.open(output=True,
channels=config.channels,
rate=config.rate,
format=audio_format,
frames_per_buffer=buffer_size,
output_device_index=device_index)
self.max_buffer = self.get_write_available()
print("BUFS", buffer_size, self.max_buffer) # max_buffer seems twice the size; mono/stereo?
print("CONFIG", config, config.chunk_time)
def __del__(self):
if self.stream is not None:
self.stream.stop_stream()
self.stream.close()
self.stream = None
if self.pyaudio:
self.pyaudio.terminate()
self.pyaudio = None
def get_write_available(self):
return self.stream.get_write_available()
def write(self, data):
return self.stream.write(data)
def get_silent_chunk(self):
"Generate and cache silent chunks"
if self.silence_cache is not None:
return self.silence_cache
silent_chunk = b'\x00' * self.config.chunk_size
self.silence_cache = silent_chunk
return silent_chunk
| mit | 2,106,156,185,354,112,000 | 32.929577 | 99 | 0.562059 | false |
hickey/amforth | core/devices/atmega165/device.py | 5 | 2197 | # Partname: ATmega165
# Built using part description XML file version 126
# generated automatically, do not edit
MCUREGS = {
'ADCH': '$79',
'ADCL': '$78',
'ADCSRA': '$7A',
'ADCSRB': '$7B',
'ADMUX': '$7C',
'DIDR0': '$7E',
'ACSR': '$50',
'DIDR1': '$7F',
'SPMCSR': '$57',
'CLKPR': '$61',
'GPIOR0': '$3E',
'GPIOR1': '$4A',
'GPIOR2': '$4B',
'MCUCR': '$55',
'MCUSR': '$54',
'OSCCAL': '$66',
'PRR': '$64',
'SMCR': '$53',
'SPH': '$5E',
'SPL': '$5D',
'SREG': '$5F',
'EEARH': '$42',
'EEARL': '$41',
'EECR': '$3F',
'EEDR': '$40',
'EICRA': '$69',
'EIFR': '$3C',
'EIMSK': '$3D',
'PCMSK0': '$6B',
'PCMSK1': '$6C',
'OCDR': '$51',
'DDRA': '$21',
'PINA': '$20',
'PORTA': '$22',
'DDRB': '$24',
'PINB': '$23',
'PORTB': '$25',
'DDRC': '$27',
'PINC': '$26',
'PORTC': '$28',
'DDRD': '$2A',
'PIND': '$29',
'PORTD': '$2B',
'DDRE': '$2D',
'PINE': '$2C',
'PORTE': '$2E',
'DDRF': '$30',
'PINF': '$2F',
'PORTF': '$31',
'DDRG': '$33',
'PING': '$32',
'PORTG': '$34',
'SPCR': '$4C',
'SPDR': '$4E',
'SPSR': '$4D',
'GTCCR': '$43',
'OCR0A': '$47',
'TCCR0A': '$44',
'TCNT0': '$46',
'TIFR0': '$35',
'TIMSK0': '$6E',
'ICR1H': '$87',
'ICR1L': '$86',
'OCR1AH': '$89',
'OCR1AL': '$88',
'OCR1BH': '$8B',
'OCR1BL': '$8A',
'TCCR1A': '$80',
'TCCR1B': '$81',
'TCCR1C': '$82',
'TCNT1H': '$85',
'TCNT1L': '$84',
'TIFR1': '$36',
'TIMSK1': '$6F',
'ASSR': '$B6',
'OCR2A': '$B3',
'TCCR2A': '$B0',
'TCNT2': '$B2',
'TIFR2': '$37',
'TIMSK2': '$70',
'UBRR0H': '$C5',
'UBRR0L': '$C4',
'UCSR0A': '$C0',
'UCSR0B': '$C1',
'UCSR0C': '$C2',
'UDR0': '$C6',
'USICR': '$B8',
'USIDR': '$BA',
'USISR': '$B9',
'WDTCR': '$60',
'INT0Addr': '$002',
'PCINT0Addr': '$004',
'PCINT1Addr': '$006',
'TIMER2_COMPAddr': '$008',
'TIMER2_OVFAddr': '$00A',
'TIMER1_CAPTAddr': '$00C',
'TIMER1_COMPAAddr': '$00E',
'TIMER1_COMPBAddr': '$010',
'TIMER1_OVFAddr': '$012',
'TIMER0_COMPAddr': '$014',
'TIMER0_OVFAddr': '$016',
'SPI_STCAddr': '$018',
'USART0_RXAddr': '$01A',
'USART0_UDREAddr': '$01C',
'USART0_TXAddr': '$01E',
'USI_STARTAddr': '$020',
'USI_OVERFLOWAddr': '$022',
'ANALOG_COMPAddr': '$024',
'ADCAddr': '$026',
'EE_READYAddr': '$028',
'SPM_READYAddr': '$02A'
} | gpl-2.0 | 5,240,910,691,929,256,000 | 17.948276 | 51 | 0.474283 | false |
NauIceLab/SpectralAnalysis | GTanalysis.py | 4 | 17361 | ##############################################################################
# Created by Garrett Thompson
# Graphical User Interface for Data Analysis
# Created at Northern Arizona University
# for use in the Astrophysical Ice Laboratory
# Advisors: Jennifer Hanley, Will Grundy, Henry Roe
# [email protected]
##############################################################################
import os
import csv
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit as cf
from scipy.fftpack import fft, fftfreq, ifft
from scipy.signal import savgol_filter as sgf
from scipy.integrate import trapz
def main():
folder_to_save = choose_dir()
#choose files for analysis
raw_x,raw_y, raw_xbg,raw_ybg = choose_files(folder_to_save)
print("Plotting imported data...")
plotting_data_for_inspection(raw_x,raw_y,'Raw Data','Wavenumber (cm-1)','% Transmittance','rawspectrum.pdf',folder_to_save, False)
plotting_data_for_inspection(raw_xbg,raw_ybg,'Raw Background','Wavenumber (cm-1)','% Transmittance','rawbackground.pdf',folder_to_save, False)
#user chooses method after inspecting plots
user_method = str(input('Press "s" for savitsky-golay filter, or "f" for fft filter\n:'))
choosing = True
while choosing:
if user_method.lower() == 's':
# savitsky-golay option was chosen
choosing = False
args_list = [folder_to_save, raw_y, raw_ybg, raw_x]
raw_x, norm_smooth = sgf_calc(args_list)
plot_data(raw_x,norm_smooth,folder_to_save)
elif user_method.lower() == 'f':
# fft option was chosen
choosing = False
frq_x,frq_xbg,fft_y,fft_ybg = fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save)
plot_figure, plot_axis = plotting_data_for_inspection(frq_x,np.log(abs(fft_ybg)),'FFT of raw bg','Cycles/Wavenumber (cm)','Log(Power/Frequency)','fft_background.pdf',folder_to_save, False)
filt_y = fft_y.copy()
filt_ybg = fft_ybg.copy()
input('Zoom to liking, then press enter to start')
print('Left to add, middle to remove nearest, and right to finish')
# global frq_cid
vert_lines=[]
frq_cid = plot_figure.canvas.mpl_connect('button_press_event',lambda event: freq_click(event, [frq_x,fft_ybg,plot_figure,plot_axis,vert_lines,filt_y,filt_ybg,folder_to_save,raw_x]))
plt.show()
plot_figure.canvas.mpl_disconnect(frq_cid)
# vert_lines, frq_x, filt_y, filt_ybg = args_dict["vert_lines"],args_dict["frq_x"],args_dict["filt_y"],args_dict["filt_ybg"]
def save_as_csv(folder_to_save,title, column1_title,column2_title,column1_data,column2_data):
os.chdir(folder_to_save)
with open(title,"w") as f:
writer = csv.writer(f)
writer.writerow([column1_title,column2_title])
writer.writerows(list(zip(column1_data,column2_data)))
os.chdir('..')
def fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save):
""" calculates FFT of data for use in nipping unwanted frequencies"""
# finds FFT of ydata
fft_y = fft(raw_y)
fft_ybg = fft(raw_ybg)
# gets frequencies for FFT of data from array, and sample spacing
frq_x = fftfreq(len(fft_y),((max(raw_x)-min(raw_x))/len(fft_y)))
frq_xbg = fftfreq(len(fft_ybg),((max(raw_xbg)-min(raw_xbg))/len(fft_ybg)))
save_as_csv(folder_to_save,"FFT_Raw_bg_data.csv","frq_x","log(abs(fft_bg))",frq_x,np.log(abs(fft_ybg)))
return frq_x, frq_xbg, fft_y, fft_ybg
def choose_dir():
"""
User chooses where all work will be saved and
time stamp is created for future reference
"""
# Where all work to follow will be saved
folder_to_save = input('Type name of directory to save all data being created\n:')
# make and change to directory named by user
os.mkdir(folder_to_save)
os.chdir(folder_to_save)
# recording date and time that program is run, saving it to folder
with open("time_created.txt", "w") as text_file:
text_file.write("Time this program was run: {} \n".format(time.strftime("%Y-%m-%d %H:%M")))
os.chdir('..')
return folder_to_save
def plotting_data_for_inspection(xdata,ydata,plot_title,plot_xlabel,plot_ylabel,filename_for_saving,folder_to_save, block_boolean):
"""
Plots data for user to look at within program
parameters
----------
xdata,ydata: x and y data to be plotted
plot_xlabel,plot_ylabel: label x and y axes in plot
file_name_for_saving: string given for saving file for later referece
block_boolean: True or False, tells if program waits for figure to close
"""
plot_figure, plot_axis = plt.subplots()
plt.plot(xdata,ydata,color='blue')
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
plt.suptitle(plot_title)
plt.show(block=block_boolean)
os.chdir(folder_to_save)
plt.savefig(filename_for_saving)
os.chdir('..')
return plot_figure, plot_axis
def choose_files(folder_to_save):
"""
Lets user determine which files will be imported for analysis
and saves preferences for reference later on
"""
raw_import = str(input('Enter a raw dataset for analysis\n:'))
print("\nGot it! Importing now... \n")
raw_x,raw_y = import_data(raw_import)
bg_import = str(input('Enter a raw background for analysis\n:'))
print("\nGot it! Importing now... \n")
raw_xbg,raw_ybg = import_data(bg_import)
os.chdir(folder_to_save)
with open("data_files_used.txt", "w") as text_file:
text_file.write("Raw data file used: {} \n".format(raw_import))
text_file.write("Raw background data file used: {}".format(bg_import))
concentration = str(input('Enter concentration of mixture\n:'))
# saving text file of concentration for later use in plotting
with open("concentration.txt","w") as f:
f.write(concentration)
temperature = str(input('Enter temperature of mixture\n:'))
# saving text file of temperature for later use in plotting
with open("temperature.txt","w") as f:
f.write(temperature)
os.chdir('..')
return raw_x, raw_y,raw_xbg,raw_ybg
# assumes a csv file, as all data stored from ice lab is in CSV format
def import_data(filename):
raw_data = np.loadtxt(open(filename,"rb"),delimiter=",")
xdat = raw_data[:,0]
ydat = raw_data[:,1]
return xdat,ydat
def freq_click(event, args_list):
# if button_click = left: add left line
# if button_click = middle: removes closest line
# if button_lick = right: finish
# add clicked data points to list
frq_x,fft_ybg,plot_figure,plot_axis,vert_lines, filt_y, filt_ybg,folder_to_save, raw_x = args_list
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
if event.button==1:
vert_lines.append(event.xdata)
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
#plt.axvline(x=vert_lines[-1],color='black')
for val in vert_lines:
plt.axvline(x=val,color='black')
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# draws points as they are added
plt.draw()
if event.button==2:
# middle click, remove closest vertical line
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(vert_lines-event.xdata).argmin()
del vert_lines[xindx]
for line in vert_lines:
plt.axvline(x=line,color='black')
# draws the new set of vertical lines
plt.draw()
if event.button==3:
# right click, ends clicking awareness
# plot_figure.canvas.mpl_disconnect(frq_cid)
os.chdir(folder_to_save)
plt.savefig('FFT_filter.pdf')
with open("freq_window.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["Xposition of vert. line"])
writer.writerows(list(zip(vert_lines)))
os.chdir('..')
# first window
args_dict ={"vert_lines":vert_lines,"frq_x":frq_x,"filt_y":filt_y,"filt_ybg":filt_ybg}
plt.close("all")
argslist = [vert_lines,frq_x,filt_y,filt_ybg]
filt_y,filt_ybg = window_filter(argslist)
fft_calc(filt_y, filt_ybg, raw_x,folder_to_save)
def fft_calc(filt_y, filt_ybg, raw_x,folder_to_save):
# dividing filtered y data from filtered bg data
norm_fft = ifft(filt_y)/ifft(filt_ybg)
save_as_csv(folder_to_save,"fft_data.csv","raw_x","fft_filt",raw_x,norm_fft.real)
plot_data(raw_x,norm_fft.real,folder_to_save)
def sgf_calc(args_list):
folder_to_save, raw_y, raw_ybg, raw_x = args_list
# warning when using sgf option
warnings.filterwarnings(action="ignore", module="scipy",message="^internal gelsd")
window_param = int(input('Input window box size (must be odd number)\n:'))
poly_param = int(input('Input polynomial order for smoothing\n:'))
# saving parameters chosen for future inspection
os.chdir(folder_to_save)
with open("sgf_params.txt", "w") as sgf_file:
sgf_file.write("Window parameter used: {} \n".format(window_param))
sgf_file.write("Polynomial paramter used: {}".format(poly_param))
#global norm_smooth
smoothed_y = sgf(raw_y,window_param,poly_param,delta=(abs(raw_y)[1]-raw_y)[0])
smoothed_ybg =sgf(raw_ybg,window_param,poly_param,delta=(abs(raw_ybg)[1]-raw_ybg)[0])
# dividing filtered y data from filtered bg data
norm_smooth = smoothed_y / smoothed_ybg
rows = list(zip(raw_x,norm_smooth))
with open("sgf_data.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["window","polynomail order"])
writer.writerow([window_param,poly_param])
writer.writerow(["raw_x","sgf_filt"])
writer.writerows(rows)
os.chdir('..')
return raw_x,norm_smooth
# range of frequenices to cut out
def window_filter(args_list):
vert_lines, frq_x, filt_y, filt_ybg = args_list
window_min, window_max= vert_lines[-2], vert_lines[-1]
for i in range(len(frq_x)):
if (frq_x[i] >= window_min and frq_x[i] <=window_max) or (frq_x[i]>-1*window_max and frq_x[i]<-1*window_min):
filt_y[i] = 0
filt_ybg[i] = 0
return filt_y,filt_ybg
def plot_data(x,y,folder_to_save):
plot_figure,plot_axis = plotting_data_for_inspection(x,y,"Divide and Filtered Spectrum","Wavenumber cm-1","Relative Intensity","dv_filt_spectrum.pdf",folder_to_save, False)
order = int(input('Zoom to liking and then enter what order polynomial for continuum fit\n:'))
xcoords,ycoords = [],[]
# tells python to turn on awareness for button presses
global cid
cid = plot_figure.canvas.mpl_connect('button_press_event', lambda event: onclick(event, [xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y]))
print('Left to add, middle to remove nearest, and right to finish')
plt.show()
# for creating continuum fit to divide out
def onclick(event,argslist):
xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y = argslist
global pvals
if event.button==1:
# left click
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
#plt.cla()
try:
# only delete if curve_fit line already drawn
if len(plot_axis.lines) !=1: plot_axis.lines.remove(plot_axis.lines[-1])
except: UnboundLocalError
# add clicked data points to list
xcoords.append(event.xdata)
ycoords.append(event.ydata)
plot_axis.scatter(xcoords,ycoords,color='black')
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is irrelevant
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
# plt.show(block=False)
if event.button==2:
# middle click, remove closest point to click
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(x,y)
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(xcoords-event.xdata).argmin()
del xcoords[xindx]
yindx = np.abs(ycoords-event.ydata).argmin()
del ycoords[yindx]
# draws the new set of scatter points, and colors them
plot_axis.scatter(xcoords,ycoords,color='black')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is ignored
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
if event.button==3:
# right click,ends clicking awareness
plot_figure.canvas.mpl_disconnect(cid)
os.chdir(folder_to_save)
plt.savefig('continuum_chosen.pdf')
# Saving polynomial eqn used in continuum divide for reference
with open("continuum_polynomial.txt", "w") as save_file:
save_file.write("%s *x^ %d " %(pvals[0],0))
for i in (range(len(pvals))):
save_file.write("+ %s *x^ %d " %(pvals[i+1],i+1))
os.chdir('..')
calc_coeffs(pvals,x,y,folder_to_save)
def calc_coeffs(pvals,x,y,folder_to_save):
fit_y = pvals(x)
# flattens the continuum
new_continuum = y / fit_y
thickness = int(input('\nEnter thickness of cell in cm\n:'))
# 2 cm thickness for our work in 2016
# remove runtime errors when taking negative log and dividing
err_settings = np.seterr(invalid='ignore')
alpha_coeffs = -np.log(new_continuum) / thickness
plotting_data_for_inspection(x,alpha_coeffs,"Alpha Coefficients","Wavenumber cm-1","Absorption cm-1","alpha_coeffs.pdf",folder_to_save,False)
save_as_csv(folder_to_save,"alpha_coeffs.csv","x","alpha",x,alpha_coeffs)
# creating masks around each peak
x_mask1 = x[(x>10000) & (x<10500)]
x_mask2 = x[(x>11200) & (x<12000)]
y_mask1 = alpha_coeffs[(x>10000) & (x<10500)]
y_mask2 = alpha_coeffs[(x>11200) & (x<12000)]
# writing data for plotting later
save_as_csv(folder_to_save,"10000_peak.csv","x","y",x_mask1,y_mask1)
save_as_csv(folder_to_save,"11200_peak.csv","x","y",x_mask2,y_mask2)
# integrated area calcs
area10000=trapz(y_mask1,x_mask1)
area11200=trapz(y_mask2,x_mask2)
os.chdir(folder_to_save)
with open("10000area.txt","w") as f:
f.write(str(area10000))
with open("11200area.txt","w") as f:
f.write(str(area11200))
os.chdir('..')
finish_prog = input("Press 'y' when finished\n:")
check = True
while check:
if (finish_prog =="y"): check = False
plt.close('all')
print("Finished!")
quit() # end of program
if __name__ == '__main__':
main()
| mit | 6,258,144,319,098,294,000 | 44.210938 | 200 | 0.580842 | false |
SeedScientific/luigi | luigi/__init__.py | 13 | 1841 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Package containing core luigi functionality.
"""
from luigi import task
from luigi import file # wtf @ naming
from luigi import rpc
from luigi import parameter
from luigi import configuration
from luigi import interface
from luigi import target
from luigi import event
Event = event.Event
Task = task.Task
Config = task.Config
ExternalTask = task.ExternalTask
WrapperTask = task.WrapperTask
Target = target.Target
File = file.File # TODO: remove, should be LocalTarget
LocalTarget = file.LocalTarget
Parameter = parameter.Parameter
RemoteScheduler = rpc.RemoteScheduler
RPCError = rpc.RPCError
run = interface.run
build = interface.build
# TODO: how can we get rid of these?
DateHourParameter = parameter.DateHourParameter
DateMinuteParameter = parameter.DateMinuteParameter
DateParameter = parameter.DateParameter
IntParameter = parameter.IntParameter
FloatParameter = parameter.FloatParameter
BooleanParameter = parameter.BooleanParameter # backward compatibility
BoolParameter = parameter.BoolParameter
DateIntervalParameter = parameter.DateIntervalParameter
TimeDeltaParameter = parameter.TimeDeltaParameter
namespace = task.namespace
from .tools import range # just makes the tool classes available from command line
| apache-2.0 | -1,592,065,354,899,458,300 | 29.683333 | 83 | 0.798479 | false |
gmalmquist/pants | contrib/go/tests/python/pants_test/contrib/go/tasks/test_go_fetch.py | 2 | 8585 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from collections import defaultdict
from pants.build_graph.address import Address
from pants.util.contextutil import temporary_dir
from pants_test.tasks.task_test_base import TaskTestBase
from pants.contrib.go.subsystems.fetcher import ArchiveFetcher
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_fetch import GoFetch
class GoFetchTest(TaskTestBase):
address = Address.parse
@classmethod
def task_type(cls):
return GoFetch
def test_get_remote_import_paths(self):
go_fetch = self.create_task(self.context())
self.create_file('src/github.com/u/a/a.go', contents="""
package a
import (
"fmt"
"math"
"sync"
"bitbucket.org/u/b"
"github.com/u/c"
)
""")
remote_import_ids = go_fetch._get_remote_import_paths('github.com/u/a',
gopath=self.build_root)
self.assertItemsEqual(remote_import_ids, ['bitbucket.org/u/b', 'github.com/u/c'])
def test_resolve_and_inject_explicit(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
r2 = self.make_target(spec='3rdparty/go/r2', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
resolved = go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', implicit_ok=False)
self.assertEqual(r2, resolved)
def test_resolve_and_inject_explicit_failure(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
with self.assertRaises(go_fetch.UndeclaredRemoteLibError) as cm:
go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', implicit_ok=False)
self.assertEqual(cm.exception.address, self.address('3rdparty/go/r2'))
def test_resolve_and_inject_implicit(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
r2 = go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', implicit_ok=True)
self.assertEqual(self.address('3rdparty/go/r2'), r2.address)
self.assertIsInstance(r2, GoRemoteLibrary)
def test_resolve_and_inject_implicit_already_exists(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
self.make_target(spec='3rdparty/go/r2', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
r2_resolved = go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', implicit_ok=True)
self.assertEqual(self.address('3rdparty/go/r2'), r2_resolved.address)
self.assertIsInstance(r2_resolved, GoRemoteLibrary)
def _create_package(self, dirpath, name, deps):
"""Creates a Go package inside dirpath named 'name' importing deps."""
imports = ['import "localzip/{}"'.format(d) for d in deps]
f = os.path.join(dirpath, '{name}/{name}.go'.format(name=name))
self.create_file(f, contents=
"""package {name}
{imports}
""".format(name=name, imports='\n'.join(imports)))
def _create_zip(self, src, dest, name):
"""Zips the Go package in src named 'name' into dest."""
shutil.make_archive(os.path.join(dest, name), 'zip', root_dir=src)
def _create_remote_lib(self, name):
self.make_target(spec='3rdparty/go/localzip/{name}'.format(name=name),
target_type=GoRemoteLibrary,
pkg=name)
def _init_dep_graph_files(self, src, zipdir, dep_graph):
"""Given a dependency graph, initializes the corresponding BUILD/packages/zip files.
Packages are placed in src, and their zipped contents are placed in zipdir.
"""
for t, deps in dep_graph.items():
self._create_package(src, t, deps)
self._create_zip(src, zipdir, t)
self._create_remote_lib(t)
def _create_fetch_context(self, zipdir):
"""Given a directory of zipfiles, creates a context for GoFetch."""
matcher = ArchiveFetcher.UrlInfo(url_format=os.path.join(zipdir, '\g<zip>.zip'),
default_rev='HEAD',
strip_level=0)
self.set_options_for_scope('go-fetchers', matchers={r'localzip/(?P<zip>[^/]+)': matcher})
context = self.context()
context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
return context
def _assert_dependency_graph(self, root_target, dep_map):
"""Recursively assert that the dependency graph starting at root_target matches dep_map."""
if root_target.name not in dep_map:
return
expected_spec_paths = set('3rdparty/go/localzip/{}'.format(name)
for name in dep_map[root_target.name])
actual_spec_paths = set(dep.address.spec_path for dep in root_target.dependencies)
self.assertEqual(actual_spec_paths, expected_spec_paths)
dep_map = dep_map.copy()
del dep_map[root_target.name]
for dep in root_target.dependencies:
self._assert_dependency_graph(dep, dep_map)
def test_transitive_download_remote_libs_simple(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
dep_graph = {
'r1': ['r2'],
'r2': ['r3'],
'r3': []
}
self._init_dep_graph_files(src, zipdir, dep_graph)
self.set_options_for_scope('source', source_roots={'3rdparty/go': ['go_remote']})
r1 = self.target('3rdparty/go/localzip/r1')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1})
self.assertEqual(undeclared_deps, {})
self._assert_dependency_graph(r1, dep_graph)
def test_transitive_download_remote_libs_complex(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
dep_graph = {
'r1': ['r3', 'r4'],
'r2': ['r3'],
'r3': ['r4'],
'r4': []
}
self._init_dep_graph_files(src, zipdir, dep_graph)
self.set_options_for_scope('source', source_roots={'3rdparty/go': ['go_remote']})
r1 = self.target('3rdparty/go/localzip/r1')
r2 = self.target('3rdparty/go/localzip/r2')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1, r2})
self.assertEqual(undeclared_deps, {})
self._assert_dependency_graph(r1, dep_graph)
self._assert_dependency_graph(r2, dep_graph)
def test_transitive_download_remote_libs_undeclared_deps(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
dep_graph = {
'r1': ['r2', 'r3'],
'r2': ['r4']
}
self._init_dep_graph_files(src, zipdir, dep_graph)
self.set_options_for_scope('source', source_roots={'3rdparty/go': ['go_remote']})
r1 = self.target('3rdparty/go/localzip/r1')
r2 = self.target('3rdparty/go/localzip/r2')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1})
expected = defaultdict(set)
expected[r1] = {('localzip/r3', self.address('3rdparty/go/localzip/r3'))}
expected[r2] = {('localzip/r4', self.address('3rdparty/go/localzip/r4'))}
self.assertEqual(undeclared_deps, expected)
def test_issues_2616(self):
go_fetch = self.create_task(self.context())
self.create_file('src/github.com/u/a/a.go', contents="""
package a
import (
"fmt"
"math"
"sync"
"bitbucket.org/u/b"
)
""")
self.create_file('src/github.com/u/a/b.go', contents="""
package a
/*
#include <stdlib.h>
*/
import "C" // C was erroneously categorized as a remote lib in issue 2616.
import (
"fmt"
"github.com/u/c"
)
""")
remote_import_ids = go_fetch._get_remote_import_paths('github.com/u/a',
gopath=self.build_root)
self.assertItemsEqual(remote_import_ids, ['bitbucket.org/u/b', 'github.com/u/c'])
| apache-2.0 | -8,099,423,934,128,981,000 | 36.986726 | 95 | 0.635294 | false |
mitmedialab/MediaCloud-Web-Tools | server/auth.py | 1 | 5016 | import datetime
import logging
import flask_login
import mediacloud.api
from flask import session
from server import user_db, login_manager
logger = logging.getLogger(__name__)
ROLE_ADMIN = 'admin' # Do everything, including editing users
ROLE_ADMIN_READ_ONLY = 'admin-readonly' # Read access to admin interface
ROLE_MEDIA_EDIT = 'media-edit' # Add / edit media; includes feeds
ROLE_STORY_EDIT = 'story-edit' # Add / edit stories
ROLE_TM = 'tm' # Topic mapper; includes media and story editing
ROLE_STORIES_API = 'stories-api' # Access to the stories api
ROLE_SEARCH = 'search' # Access to the /search pages
ROLE_TM_READ_ONLY = 'tm-readonly' # Topic mapper; excludes media and story editing
# User class
class User(flask_login.UserMixin):
def __init__(self, profile):
self.profile = profile
self.name = profile['email']
self.id = profile['api_key']
self.active = profile['active']
self.created = datetime.datetime.now()
@property
def is_active(self):
return self.active
@property
def is_anonymous(self):
return False
@property
def is_authenticated(self):
return True
def has_auth_role(self, role):
my_roles = self.profile['auth_roles']
return (ROLE_ADMIN in my_roles) or (role in my_roles)
def create_in_db_if_needed(self):
if self.exists_in_db():
# if they are in the front-end db, then be sure to update their profile at each login
logger.debug("user %s already in db", self.name)
user_db.update_user(self.name, {'api_key': self.id, 'profile': self.profile})
return
logger.debug("user %s created in db", self.name)
user_db.add_user(self.name, self.id, self.profile)
def exists_in_db(self):
# is this user in the front-end database?
return user_db.includes_user_named(self.name)
def get_properties(self):
return {
'email': self.name,
'key': self.id,
'profile': self.profile
}
@classmethod
def get(cls, userid):
"""
:param userid: This is the user's API key
:return: the User object, or null if they aren't authorized
"""
try:
# check if the session still exists and is valid (in our shared redis cache)
# _id seems to only be set if the sessions exists in Redis
if ('_id' in session) and (session['_user_id'] == userid):
# so we don't have to refetch their profile on every request
user_in_db = user_db.find_by_api_key(userid)
return User(user_in_db['profile'])
else:
# the session isn't valid (perhaps we flushed the redis cache?
return None
except Exception:
# be safer here... if anything goes wrong make them login again
return None
@login_manager.user_loader
def load_user(userid):
# Flask-login uses this method to lookup users to see if they are logged in already
logger.debug("trying to load_user %s", userid)
return User.get(userid)
def is_user_logged_in():
if flask_login.current_user is None:
return False
return flask_login.current_user.is_authenticated
def login_user(user):
flask_login.login_user(user, remember=True)
user.create_in_db_if_needed()
logger.debug(" login succeeded")
def user_has_auth_role(role):
return flask_login.current_user.has_auth_role(role)
def user_is_admin():
return user_has_auth_role('admin')
def create_user(profile):
user = User(profile)
user.create_in_db_if_needed()
logger.debug(" added to user cache %s", user.id)
return user
def load_from_db_by_username(username):
return user_db.find_by_username(username)
def user_name():
return flask_login.current_user.name
def user_mediacloud_key():
# Return the IP-restricted API token for this user from the cookie (note: this is the server IP)
return flask_login.current_user.profile['api_key']
def user_admin_mediacloud_client(user_mc_key=None):
# Return a mediacloud admin client for the logged in user. Passing in a key lets you ovveride reading it out
# of the request object (which you might not have access to)
mc_key_to_use = user_mc_key
if mc_key_to_use is None:
mc_key_to_use = user_mediacloud_key()
user_mc = mediacloud.api.AdminMediaCloud(mc_key_to_use)
return user_mc
def user_mediacloud_client(user_mc_key=None):
# Return a mediacloud client for the logged in user. Passing in a key lets you ovveride reading it out
# of the request object (which you might not have access to)
mc_key_to_use = user_mc_key
if mc_key_to_use is None:
mc_key_to_use = user_mediacloud_key()
user_mc = mediacloud.api.MediaCloud(mc_key_to_use)
return user_mc
| apache-2.0 | 5,876,654,610,688,028,000 | 32 | 113 | 0.632376 | false |
zstackorg/zstack-woodpecker | integrationtest/vm/multihosts/volumes/paths/path131.py | 2 | 2185 | import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template2",
path_list=[[TestAction.create_volume, "volume1"], \
[TestAction.attach_volume, "vm1", "volume1"], \
[TestAction.create_volume, "volume2"], \
[TestAction.attach_volume, "vm1", "volume2"], \
[TestAction.create_volume, "volume3"], \
[TestAction.attach_volume, "vm1", "volume3"], \
[TestAction.create_volume, "volume4"], \
[TestAction.attach_volume, "vm1", "volume4"], \
[TestAction.create_volume, "volume5"], \
[TestAction.attach_volume, "vm1", "volume5"], \
[TestAction.create_volume, "volume6"], \
[TestAction.attach_volume, "vm1", "volume6"], \
[TestAction.create_volume, "volume7"], \
[TestAction.attach_volume, "vm1", "volume7"], \
[TestAction.create_volume, "volume8"], \
[TestAction.attach_volume, "vm1", "volume8"], \
[TestAction.detach_volume, "volume1"], \
[TestAction.detach_volume, "volume2"], \
[TestAction.detach_volume, "volume3"], \
[TestAction.detach_volume, "volume4"], \
[TestAction.detach_volume, "volume5"], \
[TestAction.detach_volume, "volume6"], \
[TestAction.detach_volume, "volume7"], \
[TestAction.detach_volume, "volume8"], \
[TestAction.create_volume_snapshot, "vm1-root", "snapshot1"], \
[TestAction.create_data_vol_template_from_volume, "volume1", "image1"], \
[TestAction.delete_volume, "volume1"], \
[TestAction.delete_volume, "volume2"], \
[TestAction.delete_volume, "volume3"], \
[TestAction.delete_volume, "volume4"], \
[TestAction.delete_volume, "volume5"], \
[TestAction.delete_volume, "volume6"], \
[TestAction.delete_volume, "volume7"], \
[TestAction.delete_volume, "volume8"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_snapshot, "snapshot1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.create_volume, "volume9"], \
[TestAction.attach_volume, "vm1", "volume9"], \
[TestAction.detach_volume, "volume9"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.change_vm_image, "vm1"], \
[TestAction.resize_data_volume, "volume9", 5*1024*1024], \
[TestAction.start_vm, "vm1"], \
[TestAction.reboot_vm, "vm1"]])
| apache-2.0 | 5,785,840,727,852,143,000 | 42.7 | 75 | 0.670023 | false |
christi3k/zulip | zproject/jinja2/__init__.py | 12 | 1088 | from __future__ import absolute_import # Python 2 only
from typing import Any
from django.contrib.staticfiles.storage import staticfiles_storage
from django.template.defaultfilters import slugify, pluralize
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils import translation
from django.http import HttpResponse
from jinja2 import Environment
from .compressors import minified_js
from zerver.templatetags.app_filters import display_list, render_markdown_path
def environment(**options):
# type: (**Any) -> Environment
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
'minified_js': minified_js,
})
env.install_gettext_translations(translation, True) # type: ignore # https://github.com/python/typeshed/issues/927
env.filters['slugify'] = slugify
env.filters['pluralize'] = pluralize
env.filters['display_list'] = display_list
env.filters['render_markdown_path'] = render_markdown_path
return env
| apache-2.0 | -3,926,545,575,955,192,300 | 31.969697 | 119 | 0.742647 | false |
tuxology/bcc | tests/python/test_tools_memleak.py | 4 | 4282 | #!/usr/bin/env python
from unittest import main, skipUnless, TestCase
import distutils.version
import os
import subprocess
import sys
import tempfile
TOOLS_DIR = "../../tools/"
class cfg:
cmd_format = ""
# Amount of memory to leak. Note, that test application allocates memory
# for its own needs in libc, so this amount should be large enough to be
# the biggest allocation.
leaking_amount = 30000
def kernel_version_ge(major, minor):
# True if running kernel is >= X.Y
version = distutils.version.LooseVersion(os.uname()[2]).version
if version[0] > major:
return True
if version[0] < major:
return False
if minor and version[1] < minor:
return False
return True
def setUpModule():
# Build the memory leaking application.
c_src = 'test_tools_memleak_leaker_app.c'
tmp_dir = tempfile.mkdtemp(prefix='bcc-test-memleak-')
c_src_full = os.path.dirname(sys.argv[0]) + os.path.sep + c_src
exec_dst = tmp_dir + os.path.sep + 'leaker_app'
if subprocess.call(['gcc', '-g', '-O0', '-o', exec_dst, c_src_full]) != 0:
print("can't compile the leaking application")
raise Exception
# Taking two snapshot with one second interval. Getting the largest
# allocation. Since attaching to a program happens with a delay, we wait
# for the first snapshot, then issue the command to the app. Finally,
# second snapshot is used to extract the information.
# Helper utilities "timeout" and "setbuf" are used to limit overall running
# time, and to disable buffering.
cfg.cmd_format = (
'stdbuf -o 0 -i 0 timeout -s KILL 10s ' + TOOLS_DIR +
'memleak.py -c "{} {{}} {}" -T 1 1 2'.format(exec_dst,
cfg.leaking_amount))
@skipUnless(kernel_version_ge(4, 6), "requires kernel >= 4.6")
class MemleakToolTests(TestCase):
def tearDown(self):
if self.p:
del(self.p)
def run_leaker(self, leak_kind):
# Starting memleak.py, which in turn launches the leaking application.
self.p = subprocess.Popen(cfg.cmd_format.format(leak_kind),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
shell=True)
# Waiting for the first report.
while True:
self.p.poll()
if self.p.returncode is not None:
break
line = self.p.stdout.readline()
if b"with outstanding allocations" in line:
break
# At this point, memleak.py have already launched application and set
# probes. Sending command to the leaking application to make its
# allocations.
out = self.p.communicate(input=b"\n")[0]
# If there were memory leaks, they are in the output. Filter the lines
# containing "byte" substring. Every interesting line is expected to
# start with "N bytes from"
x = [x for x in out.split(b'\n') if b'byte' in x]
self.assertTrue(len(x) >= 1,
msg="At least one line should have 'byte' substring.")
# Taking last report.
x = x[-1].split()
self.assertTrue(len(x) >= 1,
msg="There should be at least one word in the line.")
# First word is the leak amount in bytes.
return int(x[0])
def test_malloc(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("malloc"))
def test_calloc(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("calloc"))
def test_realloc(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("realloc"))
def test_posix_memalign(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("posix_memalign"))
def test_valloc(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("valloc"))
def test_memalign(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("memalign"))
def test_pvalloc(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("pvalloc"))
def test_aligned_alloc(self):
self.assertEqual(cfg.leaking_amount, self.run_leaker("aligned_alloc"))
if __name__ == "__main__":
main()
| apache-2.0 | 5,513,017,951,526,482,000 | 33.532258 | 80 | 0.617235 | false |
marratj/ansible | lib/ansible/modules/network/eos/eos_vrf.py | 19 | 8633 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_vrf
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VRFs on Arista EOS network devices
description:
- This module provides declarative management of VRFs
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the VRF.
required: true
rd:
description:
- Route distinguisher of the VRF
interfaces:
description:
- List of interfaces to check the VRF has been
configured correctly.
aggregate:
description: List of VRFs definitions
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
state:
description:
- State of the VRF configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: Create vrf
eos_vrf:
name: test
rd: 1:200
interfaces:
- Ethernet2
state: present
- name: Delete VRFs
eos_vrf:
name: test
state: absent
- name: Create aggregate of VRFs with purge
eos_vrf:
aggregate:
- { name: test4, rd: "1:204" }
- { name: test5, rd: "1:205" }
state: present
purge: yes
- name: Delete aggregate of VRFs
eos_vrf:
aggregate:
- name: test2
- name: test3
- name: test4
- name: test5
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- vrf definition test
- rd 1:100
- interface Ethernet1
- vrf forwarding test
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import remove_default_spec
from ansible.module_utils.eos import load_config, run_commands
from ansible.module_utils.eos import eos_argument_spec, check_args
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
purge = module.params['purge']
for w in want:
name = w['name']
rd = w['rd']
interfaces = w['interfaces']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent':
if obj_in_have:
commands.append('no vrf definition %s' % name)
elif state == 'present':
if not obj_in_have:
commands.append('vrf definition %s' % name)
if rd is not None:
commands.append('rd %s' % rd)
if w['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
else:
if w['rd'] is not None and w['rd'] != obj_in_have['rd']:
commands.append('vrf definition %s' % w['name'])
commands.append('rd %s' % w['rd'])
if w['interfaces']:
if not obj_in_have['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
elif set(w['interfaces']) != obj_in_have['interfaces']:
missing_interfaces = list(set(w['interfaces']) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['name'], want)
if not obj_in_want:
commands.append('no vrf definition %s' % h['name'])
return commands
def map_config_to_obj(module):
objs = []
output = run_commands(module, ['show vrf'])
lines = output[0].strip().splitlines()[2:]
for l in lines:
if not l:
continue
splitted_line = re.split(r'\s{2,}', l.strip())
if len(splitted_line) == 1:
continue
else:
obj = {}
obj['name'] = splitted_line[0]
obj['rd'] = splitted_line[1]
obj['interfaces'] = None
if len(splitted_line) > 4:
obj['interfaces'] = []
for i in splitted_line[4].split(','):
obj['interfaces'].append(i.strip())
objs.append(obj)
return objs
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'state': module.params['state'],
'rd': module.params['rd'],
'interfaces': module.params['interfaces']
})
return obj
def check_declarative_intent_params(want, module):
if module.params['interfaces']:
time.sleep(module.params['delay'])
have = map_config_to_obj(module)
for w in want:
for i in w['interfaces']:
obj_in_have = search_obj_in_list(w['name'], have)
if obj_in_have and 'interfaces' in obj_in_have and i not in obj_in_have['interfaces']:
module.fail_json(msg="Interface %s not configured on vrf %s" % (i, w['name']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
rd=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
if result['changed']:
check_declarative_intent_params(want, module)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 105,664,695,327,190,990 | 27.491749 | 104 | 0.576393 | false |
RonnyPfannschmidt/pip | src/pip/_internal/req/req_uninstall.py | 8 | 16547 | from __future__ import absolute_import
import csv
import functools
import logging
import os
import sys
import sysconfig
from pip._vendor import pkg_resources
from pip._internal.compat import WINDOWS, cache_from_source, uses_pycache
from pip._internal.exceptions import UninstallationError
from pip._internal.locations import bin_py, bin_user
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local,
normalize_path, renames,
)
from pip._internal.utils.temp_dir import TempDirectory
logger = logging.getLogger(__name__)
def _script_names(dist, script_name, is_gui):
"""Create the fully qualified name of the files created by
{console,gui}_scripts for the given ``dist``.
Returns the list of file names
"""
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
exe_name = os.path.join(bin_dir, script_name)
paths_to_remove = [exe_name]
if WINDOWS:
paths_to_remove.append(exe_name + '.exe')
paths_to_remove.append(exe_name + '.exe.manifest')
if is_gui:
paths_to_remove.append(exe_name + '-script.pyw')
else:
paths_to_remove.append(exe_name + '-script.py')
return paths_to_remove
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def compact(paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
sep = os.path.sep
short_paths = set()
for path in sorted(paths, key=len):
should_add = any(
path.startswith(shortpath.rstrip("*")) and
path[len(shortpath.rstrip("*").rstrip(sep))] == sep
for shortpath in short_paths
)
if not should_add:
short_paths.add(path)
return short_paths
def compress_for_output_listing(paths):
"""Returns a tuple of 2 sets of which paths to display to user
The first set contains paths that would be deleted. Files of a package
are not added and the top-level directory of the package has a '*' added
at the end - to signify that all it's contents are removed.
The second set contains files that would have been skipped in the above
folders.
"""
will_remove = list(paths)
will_skip = set()
# Determine folders and files
folders = set()
files = set()
for path in will_remove:
if path.endswith(".pyc"):
continue
if path.endswith("__init__.py") or ".dist-info" in path:
folders.add(os.path.dirname(path))
files.add(path)
folders = compact(folders)
# This walks the tree using os.walk to not miss extra folders
# that might get added.
for folder in folders:
for dirpath, _, dirfiles in os.walk(folder):
for fname in dirfiles:
if fname.endswith(".pyc"):
continue
file_ = os.path.normcase(os.path.join(dirpath, fname))
if os.path.isfile(file_) and file_ not in files:
# We are skipping this file. Add it to the set.
will_skip.add(file_)
will_remove = files | {
os.path.join(folder, "*") for folder in folders
}
return will_remove, will_skip
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = TempDirectory(kind="uninstall")
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def _stash(self, path):
return os.path.join(
self.save_dir.path, os.path.splitdrive(path)[1].lstrip(os.path.sep)
)
def remove(self, auto_confirm=False, verbose=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
dist_name_version = (
self.dist.project_name + "-" + self.dist.version
)
logger.info('Uninstalling %s:', dist_name_version)
with indent_log():
if auto_confirm or self._allowed_to_proceed(verbose):
self.save_dir.create()
for path in sorted(compact(self.paths)):
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info('Successfully uninstalled %s', dist_name_version)
def _allowed_to_proceed(self, verbose):
"""Display which files would be deleted and prompt for confirmation
"""
def _display(msg, paths):
if not paths:
return
logger.info(msg)
with indent_log():
for path in sorted(compact(paths)):
logger.info(path)
if not verbose:
will_remove, will_skip = compress_for_output_listing(self.paths)
else:
# In verbose mode, display all the files that are going to be
# deleted.
will_remove = list(self.paths)
will_skip = set()
_display('Would remove:', will_remove)
_display('Would not remove (might be manually added):', will_skip)
_display('Would not remove (outside of prefix):', self._refuse)
return ask('Proceed (y/n)? ', ('y', 'n')) == 'y'
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir.path is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
self.save_dir.cleanup()
self._moved_paths = []
@classmethod
def from_dist(cls, dist):
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
return cls(dist)
if dist_path in {p for p in {sysconfig.get_path("stdlib"),
sysconfig.get_path("platstdlib")}
if p}:
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
return cls(dist)
paths_to_remove = cls(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
raise UninstallationError(
"Cannot uninstall {!r}. It is a distutils installed project "
"and thus we cannot accurately determine which files belong "
"to it which would lead to only a partial uninstall.".format(
dist.project_name,
)
)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in uninstallation_paths(dist):
paths_to_remove.add(path)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, dist.project_name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location,
)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
_scripts_to_remove = []
console_scripts = dist.get_entry_map(group='console_scripts')
for name in console_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, False))
# find gui_scripts
gui_scripts = dist.get_entry_map(group='gui_scripts')
for name in gui_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, True))
for s in _scripts_to_remove:
paths_to_remove.add(s)
return paths_to_remove
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
# handle missing trailing newline
if lines and not lines[-1].endswith(endline.encode("utf-8")):
lines[-1] = lines[-1] + endline.encode("utf-8")
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
| mit | 5,896,489,632,529,907,000 | 35.367033 | 79 | 0.559678 | false |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.12/IOST_WRun_PCIE.py | 6 | 6106 | #!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_WRun_PCIE.py
# Date : Oct 25, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
from IOST_Basic import *
import gtk
import gobject
import gtk.glade
#======================================================================
IOST_WRun_PCIE_Debug_Enable = 1
#======================================================================
class IOST_WRun_PCIE():
"""
This is class to get all PCIE object from IOST_WRun_Skylark window and control to these
component
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename, window_name, builder=None):
"This is a function to get PCIEn object"
self.IOST_WRun_PCIE_window = window_name
if not builder:
self.WRun_PCIE_Builder = gtk.Builder()
self.WRun_PCIE_Builder.add_from_file(glade_filename)
self.WRun_PCIE_Builder.connect_signals(self)
else:
self.WRun_PCIE_Builder = builder
#----------------------------------------------------------------------
def WRun_GetPCIE_Obj(self, window_name):
"Get all PCIE objecs on WRun window and store in self.IOST_Objs"
# print window_name
for i in range(0, self.IOST_Data["PCIE_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Action_L"] = self.WRun_PCIE_Builder.get_object(self.IOST_Objs[window_name]["_Summary_PCIE"+str(i)+"_Action_L"])
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_PassNum_L"] = self.WRun_PCIE_Builder.get_object(self.IOST_Objs[window_name]["_Summary_PCIE"+str(i)+"_PassNum_L"])
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_FailNum_L"] = self.WRun_PCIE_Builder.get_object(self.IOST_Objs[window_name]["_Summary_PCIE"+str(i)+"_FailNum_L"])
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Name_L"] = self.WRun_PCIE_Builder.get_object(self.IOST_Objs[window_name]["_Summary_PCIE"+str(i)+"_Name_L"])
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Pass_L"] = self.WRun_PCIE_Builder.get_object(self.IOST_Objs[window_name]["_Summary_PCIE"+str(i)+"_Pass_L"])
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Fail_L"] = self.WRun_PCIE_Builder.get_object(self.IOST_Objs[window_name]["_Summary_PCIE"+str(i)+"_Fail_L"])
#----------------------------------------------------------------------
def WRun_InitPCIE_Obj(self, window_name):
"Initialization all PCIE objects when WRun start"
if self.IOST_Data["PCIE"] == STATUS_ENABLE:
self.WRun_PCIE_Builder.get_object(window_name+"_Summary_PCIE_F").set_sensitive(True)
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_PCIE_L"), color=WRUN_IP_COLOR_DEFAULT, bold=True)
for i in range(0, self.IOST_Data["PCIE_PortNum"]):
if self.IOST_Data["PCIE"+str(i)][0] == STATUS_DISABLE:
self.WRun_SetSensitive_PCIE(window_name, i, False)
else:
self.WRun_SetSensitive_PCIE(window_name, i, True)
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Name_L"], "blue", bold=True)
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Pass_L"], "green")
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Fail_L"], "red")
else:
for i in range(0, self.IOST_Data["PCIE_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_Action_L"].set_text(STATUS_DISABLE)
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_PassNum_L"].set_text(STATUS_EMPTY)
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(i)+"_FailNum_L"].set_text(STATUS_EMPTY)
self.WRun_PCIE_Builder.get_object(window_name+"_Summary_PCIE_F").set_sensitive(False)
#----------------------------------------------------------------------
def WRun_SetSensitive_PCIE(self, window_name, element, is_sensitive):
""
if not is_sensitive:
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(element)+"_Action_L"].set_text(STATUS_DISABLE)
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(element)+"_PassNum_L"].set_text(STATUS_EMPTY)
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(element)+"_FailNum_L"].set_text(STATUS_EMPTY)
self.WRun_PCIE_Builder.get_object(window_name+"_Summary_PCIE"+str(element)+"_HB").set_sensitive(is_sensitive)
else:
self.WRun_PCIE_Builder.get_object(window_name+"_Summary_PCIE"+str(element)+"_HB").set_sensitive(is_sensitive)
self.WRun_SetAction_PCIE(window_name, element, STATUS_INIT)
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(element)+"_Action_L"].set_text(self.IOST_Data["PCIE"+str(element)+"_Status"])
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(element)+"_PassNum_L"].set_text(str(self.IOST_Data["PCIE"+str(element)+"_PassNum"]))
self.IOST_Objs[window_name][window_name+"_Summary_PCIE"+str(element)+"_FailNum_L"].set_text(str(self.IOST_Data["PCIE"+str(element)+"_FailNum"]))
def WRun_SetAction_PCIE(self, window_name, element, is_action):
self.IOST_Data["PCIE"+str(element)+"_Status"]=is_action
| mit | -6,395,786,963,239,752,000 | 59.455446 | 190 | 0.584671 | false |
addition-it-solutions/project-all | addons/l10n_fr/wizard/fr_report_compute_resultant.py | 8 | 2245 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
| agpl-3.0 | -3,881,236,392,867,007,000 | 39.089286 | 93 | 0.647661 | false |
kerimlcr/ab2017-dpyo | ornek/imageio/imageio-2.1.2/debian/python-imageio/usr/lib/python2.7/dist-packages/imageio/__init__.py | 2 | 1275 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
# This docstring is used at the index of the documentation pages, and
# gets inserted into a slightly larger description (in setup.py) for
# the page on Pypi:
"""
Imageio is a Python library that provides an easy interface to read and
write a wide range of image data, including animated images, volumetric
data, and scientific formats. It is cross-platform, runs on Python 2.x
and 3.x, and is easy to install.
Main website: http://imageio.github.io
"""
# flake8: noqa
__version__ = '2.1.2'
# Load some bits from core
from .core import FormatManager, RETURN_BYTES
# Instantiate format manager
formats = FormatManager()
# Load the functions
from .core.functions import help
from .core.functions import get_reader, get_writer
from .core.functions import imread, mimread, volread, mvolread
from .core.functions import imwrite, mimwrite, volwrite, mvolwrite
# Load function aliases
from .core.functions import read, save
from .core.functions import imsave, mimsave, volsave, mvolsave
# Load all the plugins
from . import plugins
# expose the show method of formats
show_formats = formats.show
# Clean up some names
del FormatManager
| gpl-3.0 | -5,891,973,240,517,837,000 | 27.977273 | 71 | 0.761569 | false |
pawelmhm/splash | splash/pool.py | 2 | 2940 | from __future__ import absolute_import
from twisted.internet import defer
from twisted.python import log
class RenderPool(object):
"""A pool of renders. The number of slots determines how many
renders will be run in parallel, at the most."""
def __init__(self, slots, network_manager_factory, splash_proxy_factory_cls, js_profiles_path, verbosity=1):
self.network_manager_factory = network_manager_factory
self.splash_proxy_factory_cls = splash_proxy_factory_cls or (lambda profile_name: None)
self.js_profiles_path = js_profiles_path
self.active = set()
self.queue = defer.DeferredQueue()
self.verbosity = verbosity
for n in range(slots):
self._wait_for_render(None, n, log=False)
def render(self, rendercls, render_options, proxy, **kwargs):
splash_proxy_factory = self.splash_proxy_factory_cls(proxy)
pool_d = defer.Deferred()
self.queue.put((rendercls, render_options, splash_proxy_factory, kwargs, pool_d))
self.log("[%s] queued" % render_options.get_uid())
return pool_d
def _wait_for_render(self, _, slot, log=True):
if log:
self.log("SLOT %d is available" % slot)
d = self.queue.get()
d.addCallback(self._start_render, slot)
d.addBoth(self._wait_for_render, slot)
return _
def _start_render(self, slot_args, slot):
self.log("initializing SLOT %d" % (slot, ))
(rendercls, render_options, splash_proxy_factory, kwargs,
pool_d) = slot_args
render = rendercls(
network_manager=self.network_manager_factory(),
splash_proxy_factory=splash_proxy_factory,
render_options=render_options,
verbosity=self.verbosity,
)
self.active.add(render)
render.deferred.chainDeferred(pool_d)
pool_d.addErrback(self._error, render, slot)
pool_d.addBoth(self._close_render, render, slot)
self.log("[%s] SLOT %d is starting" % (render_options.get_uid(), slot))
try:
render.start(**kwargs)
except:
render.deferred.errback()
raise
self.log("[%s] SLOT %d is working" % (render_options.get_uid(), slot))
return render.deferred
def _error(self, failure, render, slot):
uid = render.render_options.get_uid()
self.log("[%s] SLOT %d finished with an error %s: %s" % (uid, slot, render, failure))
return failure
def _close_render(self, _, render, slot):
uid = render.render_options.get_uid()
self.log("[%s] SLOT %d is closing %s" % (uid, slot, render))
self.active.remove(render)
render.deferred.cancel()
render.close()
self.log("[%s] SLOT %d done with %s" % (uid, slot, render))
return _
def log(self, text):
if self.verbosity >= 2:
log.msg(text, system='pool')
| bsd-3-clause | -3,478,791,923,297,984,000 | 37.684211 | 112 | 0.607823 | false |
jordotech/sherri_satchmo | satchmo/apps/payment/modules/sagepay/config.py | 7 | 4029 | from livesettings import *
from django.utils.translation import ugettext_lazy as _
# this is so that the translation utility will pick up the string
gettext = lambda s: s
_strings = (gettext('CreditCard'), gettext('Credit Card'), gettext('Sage Pay Secure Payments'))
# These cards require the issue number and start date fields filled in.
REQUIRES_ISSUE_NUMBER = ('MAESTRO', 'SOLO')
PAYMENT_GROUP = ConfigurationGroup('PAYMENT_SAGEPAY',
_('Sage Pay Payment Settings'),
ordering=101)
config_register_list(
BooleanValue(PAYMENT_GROUP,
'LIVE',
description=_("Accept real payments"),
help_text=_("False if you want to be in test mode"),
default=False),
BooleanValue(PAYMENT_GROUP,
'SIMULATOR',
description=_("Simulated Transactions?"),
help_text=_("Must be false to accept real payments"),
default=False),
BooleanValue(PAYMENT_GROUP,
'SKIP_POST',
description=_("Skip post?"),
help_text=_("For testing only, this will skip actually posting to Sage Pay servers. This is because their servers restrict IPs of posting servers, even for tests. If you are developing on a desktop, you'll have to enable this."),
default=False),
StringValue(PAYMENT_GROUP,
'CAPTURE',
description=_('Payment Capture'),
help_text=_('This can be "Payment" which captures immediately, or "Deferred". Note that you can only use the latter if you set option on your Sage pay account first.'),
choices = (
(('PAYMENT', 'Payment')),
(('DEFERRED', 'Deferred')),
),
default = 'PAYMENT'),
ModuleValue(PAYMENT_GROUP,
'MODULE',
description=_('Implementation module'),
hidden=True,
default = 'payment.modules.sagepay'),
StringValue(PAYMENT_GROUP,
'KEY',
description=_("Module key"),
hidden=True,
default = 'SAGEPAY'),
StringValue(PAYMENT_GROUP,
'LABEL',
description=_('English name for this group on the checkout screens'),
default = 'Sage Pay Secure Payments',
dummy = _('Sage Pay Secure Payments'), # Force this to appear on po-files
help_text = _('This will be passed to the translation utility')),
MultipleStringValue(PAYMENT_GROUP,
'CREDITCHOICES',
description=_('Available credit cards'),
choices = (
(('VISA','Visa Credit/Debit')),
(('UKE','Visa Electron')),
(('DELTA','Delta')),
#(('AMEX','American Express')), # not always available
#(('DC','Diners Club')), # not always available
(('MC','Mastercard')),
(('MAESTRO','UK Maestro')),
(('SOLO','Solo')),
(('JCB','JCB')),
),
default = ('VISA', 'MC')),
StringValue(PAYMENT_GROUP,
'VENDOR',
description=_('Your Vendor Name'),
default="",
help_text= _("This is used for Live and Test transactions. Make sure to add your server IP address to VSP, or it won't work.")),
StringValue(PAYMENT_GROUP,
'VENDOR_SIMULATOR',
description=_('Simulator Vendor Name'),
default="",
help_text= _("This is used for Live and Test transactions. Make sure to activate the VSP Simulator (you have to directly request it) and add your server IP address to the VSP Simulator, or it won't work.")),
StringValue(PAYMENT_GROUP,
'CURRENCY_CODE',
description=_('Currency Code'),
help_text=_('Currency code for Sage Pay transactions.'),
default = 'GBP'),
StringValue(PAYMENT_GROUP,
'URL_BASE',
description=_('The url base used for constructing urlpatterns which will use this module'),
default = r'^sagepay/'),
BooleanValue(PAYMENT_GROUP,
'EXTRA_LOGGING',
description=_("Verbose logs"),
help_text=_("Add extensive logs during post."),
default=False)
)
| bsd-3-clause | -7,466,817,001,370,552,000 | 35.963303 | 239 | 0.601638 | false |
Perferom/android_external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/v8_profiler.py | 26 | 1625 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import tempfile
from telemetry.core.platform import profiler
class V8Profiler(profiler.Profiler):
_V8_ARG = '--js-flags=--logfile=%s --prof --log-timer-events'
@classmethod
def name(cls):
return 'v8'
@classmethod
def is_supported(cls, browser_type):
return not browser_type.startswith('cros')
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
if browser_type.startswith('android'):
dump_file = '/data/local/tmp/v8-profile.log'
else:
dump_file = tempfile.mkstemp()[1]
options.AppendExtraBrowserArgs([cls._V8_ARG % dump_file, '--no-sandbox'])
def CollectProfile(self):
# Find output filename from browser argument.
for i in self._browser_backend.browser_options.extra_browser_args:
match = re.match(self._V8_ARG % '(\S+)', i)
if match:
output_file = match.groups(0)[0]
assert output_file
# On Android pull the output file to the host.
if self._platform_backend.GetOSName() == 'android':
host_output_file = '%s.log' % self._output_path
self._browser_backend.adb.Adb().Adb().Pull(output_file, host_output_file)
# Clean the device
self._browser_backend.adb.Adb().RunShellCommand('rm %s' % output_file)
output_file = host_output_file
print 'V8 profile saved as %s' % output_file
print 'To view, open in ' \
'http://v8.googlecode.com/svn/trunk/tools/tick-processor.html'
return [output_file]
| bsd-3-clause | 2,839,174,302,564,927,000 | 33.574468 | 79 | 0.678769 | false |
jrper/fluidity | tools/profiling-graph.py | 5 | 1640 | #!/usr/bin/env python
import pylab
import matplotlib
import glob
import os.path
matplotlib.use('SVG')
def fetch_time(sub, file):
f = open(file)
for line in f:
if line.startswith("[") and sub in line:
return float(line.split()[3])
def draw_graph(times, output):
for sub in times.keys():
x = times[sub].keys()
y = [times[sub][i] for i in x]
z = zip(x, y); z.sort()
x = [tup[0] for tup in z]
y = [tup[1] for tup in z]
print "%s:" % sub
print "--> x = ", x
print "--> y = ", y
pylab.plot(x, y, label=sub)
intFormatter = pylab.FormatStrFormatter('%d')
a = pylab.gca()
a.xaxis.set_major_formatter(intFormatter)
a.yaxis.set_major_formatter(intFormatter)
pylab.legend(loc='best')
pylab.draw()
pylab.xlabel("Revision number")
pylab.ylabel("Time (s)")
pylab.savefig(output)
if __name__ == "__main__":
import optparse
usage = "usage: %prog [--subroutines] [--output] profiling-logs"
parser = optparse.OptionParser(usage)
parser.add_option("-s", "--subroutines", dest="subs", default="fluids",
help="e.g. fluids,advdif,diff3d")
parser.add_option("-o", "--output", dest="output", default="times.svg",
help="output file")
(options, args) = parser.parse_args()
# get subs, files, output
subs = options.subs.split(',')
if len(args) > 0:
files = args
else:
files = glob.glob("*.log")
output = options.output
times = {}
for sub in subs:
times[sub] = {}
for file in files:
name = int(os.path.basename(file[:-4]))
times[sub][name] = fetch_time(sub, file)
draw_graph(times, output)
| lgpl-2.1 | -7,722,557,930,395,733,000 | 23.477612 | 73 | 0.60122 | false |
Vagab0nd/SiCKRAGE | tests/db_tests.py | 1 | 5066 | """
Test show database functionality.
Tests:
DBBasicTests
DBMultiTests
"""
import threading
import time
import unittest
from datetime import datetime
import sickchill.oldbeard
from tests import test_lib as test
class DBBasicTests(test.SickChillTestDBCase):
"""
Perform basic database tests.
Tests:
test_select
"""
def setUp(self):
"""
Set up test.
"""
super().setUp()
self.sr_db = sickchill.oldbeard.db.DBConnection()
def test_select(self):
"""
Test selecting from the database
"""
self.sr_db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
class DBMultiTests(test.SickChillTestDBCase):
"""
Perform multi-threaded test of the database
Tests:
test_threaded
"""
def setUp(self):
"""
Set up test.
"""
super().setUp()
self.sr_db = sickchill.oldbeard.db.DBConnection()
def select(self):
"""
Select from the database.
"""
self.sr_db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
def test_threaded(self):
"""
Test multi-threaded selection from the database
"""
for _ in range(4):
thread = threading.Thread(target=self.select)
thread.start()
class CacheDBTests(test.SickChillTestDBCase):
def setUp(self):
super().setUp()
self.cache_db_con = sickchill.oldbeard.db.DBConnection('cache.db')
sickchill.oldbeard.db.upgrade_database(self.cache_db_con, sickchill.oldbeard.databases.cache.InitialSchema)
cur_timestamp = int(time.mktime(datetime.today().timetuple()))
self.record = (
{
'provider': 'provider',
'name': 'name',
'season': 1,
'episodes': '|1|',
'indexerid': 1,
'url': 'url',
'time': cur_timestamp,
'quality': '1',
'release_group': 'SICKCHILL',
'version': 1,
'seeders': 1,
'leechers': 1,
'size': 1
},
{'url': 'url'}
)
self.cache_db_con.action("DELETE FROM results")
query = 'INSERT OR IGNORE INTO results ({col}) VALUES ({rep})'.format(col=', '.join(self.record[0].keys()), rep=', '.join(['?'] * len(self.record[0])))
self.cache_db_con.action(query, list(self.record[0].values()))
def test_mass_upsert(self):
def num_rows():
return len(self.cache_db_con.select('SELECT url FROM results'))
self.assertEqual(num_rows(), 1, num_rows())
self.cache_db_con.upsert('results', self.record[0], self.record[1])
self.assertEqual(num_rows(), 1, )
self.cache_db_con.mass_upsert('results', [self.record], log_transaction=True)
self.assertEqual(num_rows(), 1)
self.record[0]['url'] = self.record[1]['url'] = 'new_url'
self.cache_db_con.upsert('results', self.record[0], self.record[1])
self.assertEqual(num_rows(), 2)
self.cache_db_con.mass_upsert('results', [self.record], log_transaction=True)
self.assertEqual(num_rows(), 2)
self.cache_db_con.upsert('results', self.record[0], self.record[1])
self.assertEqual(num_rows(), 2)
self.record[0]['url'] = self.record[1]['url'] = 'third_url'
self.record[0]['seeders'] = 9999
self.cache_db_con.mass_upsert('results', [self.record], log_transaction=True)
self.assertEqual(num_rows(), 3)
self.cache_db_con.upsert('results', self.record[0], self.record[1])
self.assertEqual(num_rows(), 3)
self.cache_db_con.mass_upsert('results', [self.record], log_transaction=True)
self.assertEqual(num_rows(), 3)
results = self.cache_db_con.select("SELECT * FROM results WHERE url = ?", [self.record[1]['url']])
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['url'], self.record[0]['url'])
self.assertEqual(results[0]['seeders'], self.record[0]['seeders'])
results = self.cache_db_con.select("SELECT * FROM results WHERE url = 'url'")
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['url'], 'url')
self.assertNotEqual(results[0]['url'], self.record[0]['url'])
self.assertEqual(results[0]['seeders'], 1)
self.assertNotEqual(results[0]['seeders'], self.record[0]['seeders'])
self.assertEqual(num_rows(), 3)
if __name__ == '__main__':
print("==================")
print("STARTING - DB TESTS")
print("==================")
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(DBBasicTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
# suite = unittest.TestLoader().loadTestsFromTestCase(DBMultiTests)
# unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 | 4,743,959,269,641,278,000 | 31.267516 | 159 | 0.567114 | false |
spacetelescope/asv | test/test_util.py | 2 | 11635 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import locale
import os
import sys
import shutil
import pickle
import multiprocessing
import threading
import traceback
import time
import datetime
import six
import pytest
from asv import console
from asv import util
WIN = (os.name == 'nt')
def _multiprocessing_raise_processerror(arg):
try:
raise util.ProcessError(["a"], 1, "aa", "bb")
except BaseException as exc:
# If the following is just 'raise', multiprocessing will hang
# on Python 2.7.8 due to https://bugs.python.org/issue9400
raise util.ParallelFailure(str(exc), exc.__class__, traceback.format_exc())
def _multiprocessing_raise_usererror(arg):
try:
raise util.UserError("hello")
except BaseException as exc:
raise util.ParallelFailure(str(exc), exc.__class__, traceback.format_exc())
@pytest.mark.timeout(30)
def test_parallelfailure():
# Check the workaround for https://bugs.python.org/issue9400 works
if WIN and os.path.basename(sys.argv[0]).lower().startswith('py.test'):
# Multiprocessing in spawn mode can result to problems with py.test
pytest.skip("Multiprocessing spawn mode on Windows not safe to run "
"from py.test runner.")
# The exception class must be pickleable
exc = util.ParallelFailure("test", Exception, "something")
exc2 = pickle.loads(pickle.dumps(exc))
assert exc.message == exc2.message
assert exc.exc_cls == exc2.exc_cls
assert exc.traceback_str == exc2.traceback_str
assert str(exc) == "Exception: test\n something"
# Check multiprocessing does not hang (it would hang on Python
# 2.7.8 if the 'raise utill.ParallelFailure ...' above is changed
# to just 'raise')
pool = multiprocessing.Pool(4)
try:
pool.map(_multiprocessing_raise_processerror, range(10))
except util.ParallelFailure as exc:
pass
finally:
pool.close()
# Check reraising UserError
pool = multiprocessing.Pool(4)
try:
try:
pool.map(_multiprocessing_raise_usererror, range(10))
except util.ParallelFailure as exc:
exc.reraise()
finally:
pool.close()
assert False
except util.UserError as exc:
# OK
pass
def test_which_path(tmpdir):
dirname = os.path.abspath(os.path.join(str(tmpdir), 'name with spaces'))
fn = 'asv_test_exe_1234.exe'
fn2 = 'asv_test_exe_4321.bat'
os.makedirs(dirname)
shutil.copyfile(sys.executable, os.path.join(dirname, fn))
shutil.copyfile(sys.executable, os.path.join(dirname, fn2))
old_path = os.environ.get('PATH', '')
try:
if WIN:
os.environ['PATH'] = old_path + os.pathsep + '"' + dirname + '"'
util.which('asv_test_exe_1234')
util.which('asv_test_exe_1234.exe')
util.which('asv_test_exe_4321')
util.which('asv_test_exe_4321.bat')
os.environ['PATH'] = old_path + os.pathsep + dirname
util.which('asv_test_exe_1234.exe')
util.which('asv_test_exe_4321.bat')
if WIN:
util.which('asv_test_exe_1234')
util.which('asv_test_exe_4321')
# Check the paths= argument
util.which('asv_test_exe_1234.exe', paths=[dirname])
util.which('asv_test_exe_4321.bat', paths=[dirname])
# Check non-existent files
with pytest.raises(IOError):
util.which('nonexistent.exe', paths=[dirname])
finally:
os.environ['PATH'] = old_path
def test_write_load_json(tmpdir):
data = {
'a': 1,
'b': 2,
'c': 3
}
orig_data = dict(data)
filename = os.path.join(six.text_type(tmpdir), 'test.json')
util.write_json(filename, data)
data2 = util.load_json(filename)
assert data == orig_data
assert data2 == orig_data
util.write_json(filename, data, 3)
data2 = util.load_json(filename, 3)
assert data == orig_data
assert data2 == orig_data
# Wrong API version must fail to load
with pytest.raises(util.UserError):
util.load_json(filename, 2)
with pytest.raises(util.UserError):
util.load_json(filename, 4)
util.write_json(filename, data)
with pytest.raises(util.UserError):
util.load_json(filename, 3)
def test_human_float():
items = [
# (expected, value, significant, truncate_small, significant_zeros, reference_value)
# significant
("1", 1.2345, 1),
("1.2", 1.2345, 2),
("1.23", 1.2345, 3),
("100", 123.45, 1),
("120", 123.45, 2),
("123", 123.45, 3),
("123.5", 123.45, 4),
("0.001", 0.0012346, 1),
("0.001235", 0.0012346, 4),
# significant zeros
("0.001", 0.001, 1, None, True),
("0.0010", 0.001, 2, None, True),
("0.00100", 0.001, 3, None, True),
("1", 1, 1, None, True),
("1.0", 1, 2, None, True),
("1.00", 1, 3, None, True),
# truncate small
("0", 0.001, 2, 0),
("0", 0.001, 2, 1),
("0.001", 0.001, 2, 2),
# non-finite
("inf", float('inf'), 1),
("-inf", -float('inf'), 1),
("nan", float('nan'), 1),
# negative
("-1", -1.2345, 1),
("-0.00100", -0.001, 3, None, True),
("-0", -0.001, 2, 1),
("-0.001", -0.001, 2, 2),
]
for item in items:
expected = item[0]
got = util.human_float(*item[1:])
assert got == expected, item
def test_human_time():
items = [
# (expected, value, err)
# scales
("1.00ns", 1e-9),
("1.10μs", 1.1e-6),
("1.12ms", 1.12e-3),
("1.12s", 1.123),
("1.13s", 1.126),
("1.00m", 60),
("2.00h", 3600*2),
("0s", 0),
("n/a", float("nan")),
# err
("1.00±1ns", 1e-9, 1e-9),
("1.00±0.1ns", 1e-9, 0.1e-9),
("1.00±0.01ns", 1e-9, 0.01e-9),
("1.00±0.01ns", 1e-9, 0.006e-9),
("1.00±0ns", 1e-9, 0.001e-9),
("1.00±1000000ns", 1e-9, 1e-3),
("0±1s", 0, 1),
("0±1ms", 0, 1e-3),
("0±0s", 0, 0),
]
for item in items:
expected = item[0]
got = util.human_time(*item[1:])
assert got == expected, item
got = util.human_value(item[1], 'seconds', *item[2:])
assert got == expected, item
def test_human_file_size():
items = [
# (expected, value, err)
# scales
("1", 1),
("999", 999),
("1k", 1000),
("1.1M", 1.1e6),
("1.12G", 1.12e9),
("1.12T", 1.123e12),
# err
("1±2", 1, 2),
("1±0.1k", 1e3, 123),
("12.3±4M", 12.34e6, 4321e3),
]
for item in items:
expected = item[0]
got = util.human_file_size(*item[1:])
assert got == expected, item
got = util.human_value(item[1], 'bytes', *item[2:])
assert got == expected, item
def test_parse_human_time():
items = [
# (value, expected)
("1", 60*60*24),
("1h", 60*60),
("1w", 60*60*24*7),
]
for value, expected in items:
result = util.parse_human_time(value)
assert result == expected
bad_items = [
"1:",
".",
"1x",
]
for value in bad_items:
with pytest.raises(ValueError):
util.parse_human_time(value)
def test_is_main_thread():
if sys.version_info[0] >= 3:
# NB: the test itself doesn't necessarily run in main thread...
is_main = (threading.current_thread() == threading.main_thread())
assert util.is_main_thread() == is_main
results = []
def worker():
results.append(util.is_main_thread())
thread = threading.Thread(target=worker)
thread.start()
thread.join()
assert results == [False]
def test_json_non_ascii(tmpdir):
non_ascii_data = [{'😼': '難', 'ä': 3}]
fn = os.path.join(str(tmpdir), "nonascii.json")
util.write_json(fn, non_ascii_data)
data = util.load_json(fn)
assert data == non_ascii_data
def test_interpolate_command():
good_items = [
('python {inputs}', dict(inputs='9'),
['python', '9'], {}, {0}, None),
('python "{inputs}"', dict(inputs='9'),
['python', '9'], {}, {0}, None),
('python {inputs}', dict(inputs=''),
['python', ''], {}, {0}, None),
('HELLO="asd" python "{inputs}"', dict(inputs='9'),
['python', '9'], {'HELLO': 'asd'}, {0}, None),
('HELLO="asd" return-code=any python "{inputs}"', dict(inputs='9'),
['python', '9'], {'HELLO': 'asd'}, None, None),
('HELLO="asd" return-code=255 python "{inputs}"', dict(inputs='9'),
['python', '9'], {'HELLO': 'asd'}, {255}, None),
('HELLO="asd" return-code=255 python "{inputs}"', dict(inputs='9'),
['python', '9'], {'HELLO': 'asd'}, {255}, None),
('HELLO="asd" in-dir="{somedir}" python', dict(somedir='dir'),
['python'], {'HELLO': 'asd'}, {0}, 'dir'),
]
bad_items = [
('python {foo}', {}),
('HELLO={foo} python', {}),
('return-code=none python', {}),
('return-code= python', {}),
('return-code=, python', {}),
('return-code=1,,2 python', {}),
('return-code=1 return-code=2 python', {}),
('in-dir=a in-dir=b python', {}),
]
for value, variables, e_parts, e_env, e_codes, e_cwd in good_items:
parts, env, codes, cwd = util.interpolate_command(value, variables)
assert parts == e_parts
assert env == e_env
assert codes == e_codes
assert cwd == e_cwd
for value, variables in bad_items:
with pytest.raises(util.UserError):
util.interpolate_command(value, variables)
def test_datetime_to_js_timestamp():
tss = [0, 0.5, -0.5, 12345.6789, -12345.6789,
1535910708.7767508]
for ts in tss:
t = datetime.datetime.utcfromtimestamp(ts)
ts2 = util.datetime_to_js_timestamp(t)
assert abs(ts * 1000 - ts2) <= 0.5
# Check sub-second precision
ms = 50
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 1000*ms)
assert util.datetime_to_js_timestamp(ts) == ms
# Check rounding
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 500)
assert util.datetime_to_js_timestamp(ts) == 1
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 499)
assert util.datetime_to_js_timestamp(ts) == 0
def test_datetime_to_timestamp():
tss = [0, 0.5, -0.5, 12345.6789, -12345.6789,
1535910708.7767508]
for ts in tss:
t = datetime.datetime.utcfromtimestamp(ts)
ts2 = util.datetime_to_timestamp(t)
assert abs(ts - ts2) <= 0.5
# Check rounding
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 500000)
assert util.datetime_to_timestamp(ts) == 1
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 500000 - 1)
assert util.datetime_to_timestamp(ts) == 0
def test_check_output_exit_code(capsys):
with pytest.raises(util.ProcessError):
util.check_output([sys.executable, '-c', 'import sys; sys.exit(1)'])
out, err = capsys.readouterr()
assert '(exit status 1)' in out
def test_geom_mean_na():
for x in [[1, 2, -3], [1, 2, 3], [3, 1, 3, None, None]]:
expected = abs(x[0]*x[1]*x[2])**(1/3)
assert abs(util.geom_mean_na(x) - expected) < 1e-10
| bsd-3-clause | -2,144,581,552,295,334,100 | 27.610837 | 92 | 0.547607 | false |
jodygarnett/qgis-geoserver-plugin | src/geoserverexplorer/processingprovider/uploadvector.py | 1 | 1428 | import os
from qgis.core import *
from geoserveralgorithm import GeoServerAlgorithm
from processing.core.parameters import *
from processing.tools import dataobjects
class UploadVector(GeoServerAlgorithm):
INPUT = 'INPUT'
WORKSPACE = 'WORKSPACE'
def processAlgorithm(self, progress):
self.createCatalog()
inputFilename = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(inputFilename)
workspaceName = self.getParameterValue(self.WORKSPACE)
filename = dataobjects.exportVectorLayer(layer)
basefilename = os.path.basename(filename)
basepathname = os.path.dirname(filename) + os.sep \
+ basefilename[:basefilename.find('.')]
connection = {
'shp': basepathname + '.shp',
'shx': basepathname + '.shx',
'dbf': basepathname + '.dbf',
'prj': basepathname + '.prj',
}
workspace = self.catalog.get_workspace(workspaceName)
self.catalog.create_featurestore(basefilename, connection, workspace)
def defineCharacteristics(self):
self.addBaseParameters()
self.name = 'Upload vector'
self.group = 'GeoServer tools'
self.addParameter(ParameterVector(self.INPUT, 'Layer to import',
[ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterString(self.WORKSPACE, 'Workspace'))
| gpl-2.0 | -4,286,760,542,678,160,400 | 36.578947 | 77 | 0.659664 | false |
jsma/django-cms | menus/menu_pool.py | 5 | 15748 | # -*- coding: utf-8 -*-
import warnings
from functools import partial
from logging import getLogger
from django.conf import settings
from django.contrib import messages
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.urlresolvers import NoReverseMatch
from django.utils.translation import get_language
from django.utils.translation import ugettext_lazy as _
from cms.utils import get_cms_setting
from cms.utils.django_load import load
from menus.base import Menu
from menus.exceptions import NamespaceAlreadyRegistered
from menus.models import CacheKey
import copy
logger = getLogger('menus')
def _build_nodes_inner_for_one_menu(nodes, menu_class_name):
'''
This is an easier to test "inner loop" building the menu tree structure
for one menu (one language, one site)
'''
done_nodes = {} # Dict of node.id:Node
final_nodes = []
# This is to prevent infinite loops - we need to compare the number of
# times we see a specific node to "something", and for the time being,
# it's the total number of nodes
list_total_length = len(nodes)
while nodes:
# For when the node has a parent_id but we haven't seen it yet.
# We must not append it to the final list in this case!
should_add_to_final_list = True
node = nodes.pop(0)
# Increment the "seen" counter for this specific node.
node._counter = getattr(node, '_counter', 0) + 1
# Implicit namespacing by menu.__name__
if not node.namespace:
node.namespace = menu_class_name
if node.namespace not in done_nodes:
# We need to create the namespace dict to avoid KeyErrors
done_nodes[node.namespace] = {}
# If we have seen the parent_id already...
if node.parent_id in done_nodes[node.namespace]:
# Implicit parent namespace by menu.__name__
if not node.parent_namespace:
node.parent_namespace = menu_class_name
parent = done_nodes[node.namespace][node.parent_id]
parent.children.append(node)
node.parent = parent
# If it has a parent_id but we haven't seen it yet...
elif node.parent_id:
# We check for infinite loops here, by comparing the number of
# times we "saw" this node to the number of nodes in the list
if node._counter < list_total_length:
nodes.append(node)
# Never add this node to the final list until it has a real
# parent (node.parent)
should_add_to_final_list = False
if should_add_to_final_list:
final_nodes.append(node)
# add it to the "seen" list
done_nodes[node.namespace][node.id] = node
return final_nodes
def _get_menu_class_for_instance(menu_class, instance):
"""
Returns a new menu class that subclasses
menu_class but is bound to instance.
This means it sets the "instance" attribute of the class.
"""
attrs = {'instance': instance}
class_name = menu_class.__name__
meta_class = type(menu_class)
return meta_class(class_name, (menu_class,), attrs)
class MenuRenderer(object):
# The main logic behind this class is to decouple
# the singleton menu pool from the menu rendering logic.
# By doing this we can be sure that each request has it's
# private instance that will always have the same attributes.
def __init__(self, pool, request):
self.pool = pool
# It's important this happens on init
# because we need to make sure that a menu renderer
# points to the same registered menus as long as the
# instance lives.
self.menus = pool.get_registered_menus(for_rendering=True)
self.request = request
def _build_nodes(self, site_id):
"""
This is slow. Caching must be used.
One menu is built per language and per site.
Namespaces: they are ID prefixes to avoid node ID clashes when plugging
multiple trees together.
- We iterate on the list of nodes.
- We store encountered nodes in a dict (with namespaces):
done_nodes[<namespace>][<node's id>] = node
- When a node has a parent defined, we lookup that parent in done_nodes
if it's found:
set the node as the node's parent's child (re-read this)
else:
the node is put at the bottom of the list
"""
# Before we do anything, make sure that the menus are expanded.
# Cache key management
lang = get_language()
prefix = getattr(settings, "CMS_CACHE_PREFIX", "menu_cache_")
key = "%smenu_nodes_%s_%s" % (prefix, lang, site_id)
if self.request.user.is_authenticated():
key += "_%s_user" % self.request.user.pk
cached_nodes = cache.get(key, None)
if cached_nodes:
return cached_nodes
final_nodes = []
toolbar = getattr(self.request, 'toolbar', None)
for menu_class_name in self.menus:
menu = self.get_menu(menu_class_name)
try:
nodes = menu.get_nodes(self.request)
except NoReverseMatch:
# Apps might raise NoReverseMatch if an apphook does not yet
# exist, skip them instead of crashing
nodes = []
if toolbar and toolbar.is_staff:
messages.error(self.request,
_('Menu %s cannot be loaded. Please, make sure all '
'its urls exist and can be resolved.') %
menu_class_name)
logger.error("Menu %s could not be loaded." %
menu_class_name, exc_info=True)
# nodes is a list of navigation nodes (page tree in cms + others)
final_nodes += _build_nodes_inner_for_one_menu(
nodes, menu_class_name)
cache.set(key, final_nodes, get_cms_setting('CACHE_DURATIONS')['menus'])
# We need to have a list of the cache keys for languages and sites that
# span several processes - so we follow the Django way and share through
# the database. It's still cheaper than recomputing every time!
# This way we can selectively invalidate per-site and per-language,
# since the cache shared but the keys aren't
CacheKey.objects.get_or_create(key=key, language=lang, site=site_id)
return final_nodes
def _mark_selected(self, nodes):
# There /may/ be two nodes that get marked with selected. A published
# and a draft version of the node. We'll mark both, later, the unused
# one will be removed anyway.
sel = []
for node in nodes:
node.sibling = False
node.ancestor = False
node.descendant = False
node_abs_url = node.get_absolute_url()
if node_abs_url == self.request.path[:len(node_abs_url)]:
if sel:
if len(node_abs_url) > len(sel[0].get_absolute_url()):
sel = [node]
elif len(node_abs_url) == len(sel[0].get_absolute_url()):
sel.append(node)
else:
sel = [node]
for node in nodes:
node.selected = (node in sel)
return nodes
def apply_modifiers(self, nodes, namespace=None, root_id=None,
post_cut=False, breadcrumb=False):
if not post_cut:
nodes = self._mark_selected(nodes)
# Only fetch modifiers when they're needed.
# We can do this because unlike menu classes,
# modifiers can't change on a request basis.
for cls in self.pool.get_registered_modifiers():
inst = cls(renderer=self)
nodes = inst.modify(
self.request, nodes, namespace, root_id, post_cut, breadcrumb)
return nodes
def get_nodes(self, namespace=None, root_id=None, site_id=None, breadcrumb=False):
if not site_id:
site_id = Site.objects.get_current().pk
nodes = self._build_nodes(site_id)
nodes = copy.deepcopy(nodes)
nodes = self.apply_modifiers(
nodes=nodes,
namespace=namespace,
root_id=root_id,
post_cut=False,
breadcrumb=breadcrumb,
)
return nodes
def get_menu(self, menu_name):
MenuClass = self.menus[menu_name]
return MenuClass(renderer=self)
class MenuPool(object):
def __init__(self):
self.menus = {}
self.modifiers = []
self.discovered = False
def get_renderer(self, request):
self.discover_menus()
# Returns a menu pool wrapper that is bound
# to the given request and can perform
# operations based on the given request.
return MenuRenderer(pool=self, request=request)
def discover_menus(self):
if self.discovered:
return
# FIXME: Remove in 3.4
load('menu')
load('cms_menus')
from menus.modifiers import register
register()
self.discovered = True
def get_registered_menus(self, for_rendering=False):
"""
Returns all registered menu classes.
:param for_rendering: Flag that when True forces us to include
all CMSAttachMenu subclasses, even if they're not attached.
"""
self.discover_menus()
registered_menus = {}
for menu_class_name, menu_cls in self.menus.items():
if isinstance(menu_cls, Menu):
# A Menu **instance** was registered,
# this is non-standard, but acceptable.
menu_cls = menu_cls.__class__
if hasattr(menu_cls, "get_instances"):
# It quacks like a CMSAttachMenu.
# Expand the one CMSAttachMenu into multiple classes.
# Each class is bound to the instance the menu is attached to.
_get_menu_class = partial(_get_menu_class_for_instance, menu_cls)
instances = menu_cls.get_instances() or []
for instance in instances:
# For each instance, we create a unique class
# that is bound to that instance.
# Doing this allows us to delay the instantiation
# of the menu class until it's needed.
# Plus we keep the menus consistent by always
# pointing to a class instead of an instance.
namespace = "{0}:{1}".format(
menu_class_name, instance.pk)
registered_menus[namespace] = _get_menu_class(instance)
if not instances and not for_rendering:
# The menu is a CMSAttachMenu but has no instances,
# normally we'd just ignore it but it's been
# explicitly set that we are not rendering these menus
# via the (for_rendering) flag.
registered_menus[menu_class_name] = menu_cls
elif hasattr(menu_cls, "get_nodes"):
# This is another type of Menu, cannot be expanded, but must be
# instantiated, none-the-less.
registered_menus[menu_class_name] = menu_cls
else:
raise ValidationError(
"Something was registered as a menu, but isn't.")
return registered_menus
def get_registered_modifiers(self):
return self.modifiers
def clear(self, site_id=None, language=None, all=False):
'''
This invalidates the cache for a given menu (site_id and language)
'''
if all:
cache_keys = CacheKey.objects.get_keys()
else:
cache_keys = CacheKey.objects.get_keys(site_id, language)
to_be_deleted = cache_keys.distinct().values_list('key', flat=True)
if to_be_deleted:
cache.delete_many(to_be_deleted)
cache_keys.delete()
def register_menu(self, menu_cls):
import warnings
if menu_cls.__module__.split('.')[-1] == 'menu':
warnings.warn('menu.py filename is deprecated, '
'and it will be removed in version 3.4; '
'please rename it to cms_menus.py', DeprecationWarning)
from menus.base import Menu
assert issubclass(menu_cls, Menu)
if menu_cls.__name__ in self.menus:
raise NamespaceAlreadyRegistered(
"[{0}] a menu with this name is already registered".format(
menu_cls.__name__))
# Note: menu_cls should still be the menu CLASS at this point.
self.menus[menu_cls.__name__] = menu_cls
def register_modifier(self, modifier_class):
import os
import inspect
import warnings
source_file = os.path.basename(inspect.stack()[1][1])
if source_file == 'menu.py':
warnings.warn('menu.py filename is deprecated, '
'and it will be removed in version 3.4; '
'please rename it to cms_menus.py', DeprecationWarning)
from menus.base import Modifier
assert issubclass(modifier_class, Modifier)
if modifier_class not in self.modifiers:
self.modifiers.append(modifier_class)
def get_menus_by_attribute(self, name, value):
"""
Returns the list of menus that match the name/value criteria provided.
"""
# Note that we are limiting the output to only single instances of any
# specific menu class. This is to address issue (#4041) which has
# cropped-up in 3.0.13/3.0.0.
# By setting for_rendering to False
# we're limiting the output to menus
# that are registered and have instances
# (in case of attached menus).
menus = self.get_registered_menus(for_rendering=False)
return sorted(list(set([(menu.__name__, menu.name)
for menu_class_name, menu in menus.items()
if getattr(menu, name, None) == value])))
def get_nodes_by_attribute(self, nodes, name, value):
return [node for node in nodes if node.attr.get(name, None) == value]
def apply_modifiers(self, nodes, request, namespace=None, root_id=None,
post_cut=False, breadcrumb=False):
warnings.warn('menu_pool.apply_modifiers is deprecated '
'and it will be removed in version 3.4; '
'please use the menu renderer instead.', DeprecationWarning)
renderer = self.get_renderer(request)
nodes = renderer.apply_modifiers(
nodes=nodes,
namespace=namespace,
root_id=root_id,
post_cut=post_cut,
breadcrumb=breadcrumb,
)
return nodes
def get_nodes(self, request, namespace=None, root_id=None, site_id=None,
breadcrumb=False):
warnings.warn('menu_pool.get_nodes is deprecated '
'and it will be removed in version 3.4; '
'please use the menu renderer instead.', DeprecationWarning)
renderer = self.get_renderer(request)
nodes = renderer.get_nodes(
namespace=namespace,
root_id=root_id,
site_id=site_id,
breadcrumb=breadcrumb,
)
return nodes
menu_pool = MenuPool()
| bsd-3-clause | -537,476,475,371,137,340 | 39.379487 | 86 | 0.588837 | false |
tacgomes/morph | morphlib/definitions_version.py | 1 | 2380 | # Copyright (C) 2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-2 =*=
'''Functions for dealing with the definitions VERSION marker file.'''
import cliapp
import yaml
import morphlib
SUPPORTED_VERSIONS = [6, 7]
class DefinitionsVersionError(cliapp.AppException):
pass
class UnknownVersionError(DefinitionsVersionError): # pragma: no cover
def __init__(self, version):
DefinitionsVersionError.__init__(
self, "Definitions format version %s is not supported" % version)
class InvalidVersionFileError(DefinitionsVersionError): # pragma: no cover
def __init__(self, text):
DefinitionsVersionError.__init__(
self, "invalid VERSION file: '%s'" % text)
def parse_version_file(version_text):
'''Parse VERSION file and return the version of the format if:
VERSION is a YAML file
and it's a dict
and has the key 'version'
and the type stored in the 'version' key is an int
otherwise returns None
'''
yaml_obj = yaml.safe_load(version_text)
return (yaml_obj['version'] if yaml_obj is not None
and isinstance(yaml_obj, dict)
and 'version' in yaml_obj
and isinstance(yaml_obj['version'], int)
else None)
def check_version_file(version_text): # pragma: no cover
'''Check the VERSION information is valid and is a supported version.'''
if version_text == None:
raise InvalidVersionFileError()
version = morphlib.definitions_version.parse_version_file(version_text)
if version == None:
raise InvalidVersionFileError(version_text)
if version not in SUPPORTED_VERSIONS:
raise UnknownVersionError(version)
return version
| gpl-2.0 | -2,368,373,624,016,354,300 | 28.382716 | 77 | 0.673109 | false |
caosmo/pip | pip/__init__.py | 13 | 10419 | #!/usr/bin/env python
from __future__ import absolute_import
import logging
import os
import optparse
import warnings
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "7.2.0.dev0"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWArning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Enable our Deprecation Warnings
for deprecation_warning in deprecation.DEPRECATIONS:
warnings.simplefilter("default", deprecation_warning)
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
| mit | -3,944,030,378,682,795,500 | 32.07619 | 79 | 0.568577 | false |
rwightman/pytorch-image-models | timm/utils/model_ema.py | 1 | 5670 | """ Exponential Moving Average (EMA) of model updates
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
_logger = logging.getLogger(__name__)
class ModelEma:
""" Model Exponential Moving Average (DEPRECATED)
Keep a moving average of everything in the model state_dict (parameters and buffers).
This version is deprecated, it does not work with scripted models. Will be removed eventually.
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device='', resume=''):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
# correct a mismatch in state dict keys
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
for k, ema_v in self.ema.state_dict().items():
if needs_module:
k = 'module.' + k
model_v = msd[k].detach()
if self.device:
model_v = model_v.to(device=self.device)
ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device=None):
super(ModelEmaV2, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
| apache-2.0 | -66,506,024,903,776,400 | 44 | 102 | 0.657319 | false |
jackoalan/PSPL | Extensions/PMDLFormat/Blender/io_pspl/pspl_uvgen.py | 1 | 2087 | '''
PSPL Blender Addon
By Jack Andersen <[email protected]>
This file re-indexes UV generators for runtime-compatible materials
with valid texture slots.
'''
# Blend types supported by PSPL
pspl_compatible_blend_types = ['MIX', 'MULTIPLY', 'ADD', 'SUBTRACT']
# UV generator types types supported by PSPL
pspl_compatible_uvgen_types = ['ORCO', 'UV', 'NORMAL']
# Filter compatible texture slots
def pspl_filter_texture_slots(material):
tex_slots = []
for i in range(len(material.texture_slots)):
slot = material.texture_slots[i]
if slot is None:
continue
# Ensure PSPL compatibility
if not slot.use:
continue
if not (slot.use_map_color_diffuse or slot.use_map_alpha):
continue
if slot.blend_type not in pspl_compatible_blend_types:
continue
if slot.texture_coords not in pspl_compatible_uvgen_types:
continue
if slot.texture.type != 'IMAGE':
continue
tex_slots.append(i)
return tex_slots
# Determine PSPL UV index
def pspl_uv_index(material, tex_name):
tex_slots = pspl_filter_texture_slots(material)
this_tex_slot_idx = material.texture_slots.find(tex_name)
if this_tex_slot_idx >= 0:
if this_tex_slot_idx not in tex_slots:
return -1
else:
return -1
this_tex_slot = material.texture_slots[this_tex_slot_idx]
return_uv_idx = 0
for tex_slot_idx in tex_slots:
tex_slot = material.texture_slots[tex_slot_idx]
if tex_slot.texture.pspl_uv_share_texture and tex_slot.texture.pspl_uv_share_texture != tex_slot.name:
if tex_slot == this_tex_slot:
child_slot = pspl_uv_index(material, tex_slot.texture.pspl_uv_share_texture)
if child_slot >= 0:
return child_slot
else:
continue
if tex_slot == this_tex_slot:
return return_uv_idx
return_uv_idx += 1
return -1
| mit | -2,607,867,614,531,877,000 | 27.986111 | 110 | 0.597508 | false |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_core/variables/expression_tests/aliases_file.py | 2 | 1737 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset import Dataset
from opus_core.storage_factory import StorageFactory
from numpy import array, ma
class Tests(opus_unittest.OpusTestCase):
def test_alias_file(self):
# this tests aliases in the file 'aliases.py'
# expr1 and expr2 are aliases, while expr3 is an ordinary variable,
# just to make sure that aliases and ordinary variables interoperate correctly
expr1 = "opus_core.test_agent.income_times_10"
expr2 = "opus_core.test_agent.income_times_5"
expr3 = "opus_core.test_agent.income_times_2"
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='test_agents',
table_data={
"income":array([1,5,10]),
"id":array([1,3,4])
}
)
dataset = Dataset(in_storage=storage, in_table_name='test_agents', id_name="id", dataset_name="test_agent")
result1 = dataset.compute_variables([expr1])
self.assert_(ma.allclose(result1, array([10, 50, 100]), rtol=1e-6), "Error in test_alias_file")
result2 = dataset.compute_variables([expr2])
self.assert_(ma.allclose(result2, array([5, 25, 50]), rtol=1e-6), "Error in test_alias_file")
result3 = dataset.compute_variables([expr3])
self.assert_(ma.allclose(result3, array([2, 10, 20]), rtol=1e-6), "Error in test_alias_file")
if __name__=='__main__':
opus_unittest.main()
| gpl-2.0 | -6,464,573,354,038,030,000 | 44.945946 | 115 | 0.633276 | false |
moijes12/oh-mainline | vendor/packages/twisted/twisted/enterprise/sqlreflector.py | 19 | 11865 | # -*- test-case-name: twisted.test.test_reflector -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.enterprise import reflector
from twisted.enterprise.util import DBError, getKeyColumn, quote, safe
from twisted.enterprise.util import _TableInfo
from twisted.enterprise.row import RowObject
from twisted.python import reflect
class SQLReflector(reflector.Reflector):
"""
DEPRECATED.
I reflect on a database and load RowObjects from it.
In order to do this, I interrogate a relational database to
extract schema information and interface with RowObject class
objects that can interact with specific tables.
"""
populated = 0
conditionalLabels = {
reflector.EQUAL : "=",
reflector.LESSTHAN : "<",
reflector.GREATERTHAN : ">",
reflector.LIKE : "like"
}
def __init__(self, dbpool, rowClasses):
"""Initialize me against a database.
"""
reflector.Reflector.__init__(self, rowClasses)
self.dbpool = dbpool
def _populate(self):
self._transPopulateSchema()
def _transPopulateSchema(self):
"""Used to construct the row classes in a single interaction.
"""
for rc in self.rowClasses:
if not issubclass(rc, RowObject):
raise DBError("Stub class (%s) is not derived from RowObject" % reflect.qual(rc.rowClass))
self._populateSchemaFor(rc)
self.populated = 1
def _populateSchemaFor(self, rc):
"""Construct all the SQL templates for database operations on
<tableName> and populate the class <rowClass> with that info.
"""
attributes = ("rowColumns", "rowKeyColumns", "rowTableName" )
for att in attributes:
if not hasattr(rc, att):
raise DBError("RowClass %s must have class variable: %s" % (rc, att))
tableInfo = _TableInfo(rc)
tableInfo.updateSQL = self.buildUpdateSQL(tableInfo)
tableInfo.insertSQL = self.buildInsertSQL(tableInfo)
tableInfo.deleteSQL = self.buildDeleteSQL(tableInfo)
self.populateSchemaFor(tableInfo)
def escape_string(self, text):
"""Escape a string for use in an SQL statement. The default
implementation escapes ' with '' and \ with \\. Redefine this
function in a subclass if your database server uses different
escaping rules.
"""
return safe(text)
def quote_value(self, value, type):
"""Format a value for use in an SQL statement.
@param value: a value to format as data in SQL.
@param type: a key in util.dbTypeMap.
"""
return quote(value, type, string_escaper=self.escape_string)
def loadObjectsFrom(self, tableName, parentRow=None, data=None,
whereClause=None, forceChildren=0):
"""Load a set of RowObjects from a database.
Create a set of python objects of <rowClass> from the contents
of a table populated with appropriate data members.
Example::
| class EmployeeRow(row.RowObject):
| pass
|
| def gotEmployees(employees):
| for emp in employees:
| emp.manager = "fred smith"
| manager.updateRow(emp)
|
| reflector.loadObjectsFrom("employee",
| data = userData,
| whereClause = [("manager" , EQUAL, "fred smith")]
| ).addCallback(gotEmployees)
NOTE: the objects and all children should be loaded in a single transaction.
NOTE: can specify a parentRow _OR_ a whereClause.
"""
if parentRow and whereClause:
raise DBError("Must specify one of parentRow _OR_ whereClause")
if parentRow:
info = self.getTableInfo(parentRow)
relationship = info.getRelationshipFor(tableName)
whereClause = self.buildWhereClause(relationship, parentRow)
elif whereClause:
pass
else:
whereClause = []
return self.dbpool.runInteraction(self._rowLoader, tableName,
parentRow, data, whereClause,
forceChildren)
def _rowLoader(self, transaction, tableName, parentRow, data,
whereClause, forceChildren):
"""immediate loading of rowobjects from the table with the whereClause.
"""
tableInfo = self.schema[tableName]
# Build the SQL for the query
sql = "SELECT "
first = 1
for column, type in tableInfo.rowColumns:
if first:
first = 0
else:
sql = sql + ","
sql = sql + " %s" % column
sql = sql + " FROM %s " % (tableName)
if whereClause:
sql += " WHERE "
first = 1
for wItem in whereClause:
if first:
first = 0
else:
sql += " AND "
(columnName, cond, value) = wItem
t = self.findTypeFor(tableName, columnName)
quotedValue = self.quote_value(value, t)
sql += "%s %s %s" % (columnName, self.conditionalLabels[cond],
quotedValue)
# execute the query
transaction.execute(sql)
rows = transaction.fetchall()
# construct the row objects
results = []
newRows = []
for args in rows:
kw = {}
for i in range(0,len(args)):
ColumnName = tableInfo.rowColumns[i][0].lower()
for attr, type in tableInfo.rowClass.rowColumns:
if attr.lower() == ColumnName:
kw[attr] = args[i]
break
# find the row in the cache or add it
resultObject = self.findInCache(tableInfo.rowClass, kw)
if not resultObject:
meth = tableInfo.rowFactoryMethod[0]
resultObject = meth(tableInfo.rowClass, data, kw)
self.addToCache(resultObject)
newRows.append(resultObject)
results.append(resultObject)
# add these rows to the parentRow if required
if parentRow:
self.addToParent(parentRow, newRows, tableName)
# load children or each of these rows if required
for relationship in tableInfo.relationships:
if not forceChildren and not relationship.autoLoad:
continue
for row in results:
# build where clause
childWhereClause = self.buildWhereClause(relationship, row)
# load the children immediately, but do nothing with them
self._rowLoader(transaction,
relationship.childRowClass.rowTableName,
row, data, childWhereClause, forceChildren)
return results
def findTypeFor(self, tableName, columnName):
tableInfo = self.schema[tableName]
columnName = columnName.lower()
for column, type in tableInfo.rowColumns:
if column.lower() == columnName:
return type
def buildUpdateSQL(self, tableInfo):
"""(Internal) Build SQL template to update a RowObject.
Returns: SQL that is used to contruct a rowObject class.
"""
sql = "UPDATE %s SET" % tableInfo.rowTableName
# build update attributes
first = 1
for column, type in tableInfo.rowColumns:
if getKeyColumn(tableInfo.rowClass, column):
continue
if not first:
sql = sql + ", "
sql = sql + " %s = %s" % (column, "%s")
first = 0
# build where clause
first = 1
sql = sql + " WHERE "
for keyColumn, type in tableInfo.rowKeyColumns:
if not first:
sql = sql + " AND "
sql = sql + " %s = %s " % (keyColumn, "%s")
first = 0
return sql
def buildInsertSQL(self, tableInfo):
"""(Internal) Build SQL template to insert a new row.
Returns: SQL that is used to insert a new row for a rowObject
instance not created from the database.
"""
sql = "INSERT INTO %s (" % tableInfo.rowTableName
# build column list
first = 1
for column, type in tableInfo.rowColumns:
if not first:
sql = sql + ", "
sql = sql + column
first = 0
sql = sql + " ) VALUES ("
# build values list
first = 1
for column, type in tableInfo.rowColumns:
if not first:
sql = sql + ", "
sql = sql + "%s"
first = 0
sql = sql + ")"
return sql
def buildDeleteSQL(self, tableInfo):
"""Build the SQL template to delete a row from the table.
"""
sql = "DELETE FROM %s " % tableInfo.rowTableName
# build where clause
first = 1
sql = sql + " WHERE "
for keyColumn, type in tableInfo.rowKeyColumns:
if not first:
sql = sql + " AND "
sql = sql + " %s = %s " % (keyColumn, "%s")
first = 0
return sql
def updateRowSQL(self, rowObject):
"""Build SQL to update the contents of rowObject.
"""
args = []
tableInfo = self.schema[rowObject.rowTableName]
# build update attributes
for column, type in tableInfo.rowColumns:
if not getKeyColumn(rowObject.__class__, column):
args.append(self.quote_value(rowObject.findAttribute(column),
type))
# build where clause
for keyColumn, type in tableInfo.rowKeyColumns:
args.append(self.quote_value(rowObject.findAttribute(keyColumn),
type))
return self.getTableInfo(rowObject).updateSQL % tuple(args)
def updateRow(self, rowObject):
"""Update the contents of rowObject to the database.
"""
sql = self.updateRowSQL(rowObject)
rowObject.setDirty(0)
return self.dbpool.runOperation(sql)
def insertRowSQL(self, rowObject):
"""Build SQL to insert the contents of rowObject.
"""
args = []
tableInfo = self.schema[rowObject.rowTableName]
# build values
for column, type in tableInfo.rowColumns:
args.append(self.quote_value(rowObject.findAttribute(column),type))
return self.getTableInfo(rowObject).insertSQL % tuple(args)
def insertRow(self, rowObject):
"""Insert a new row for rowObject.
"""
rowObject.setDirty(0)
sql = self.insertRowSQL(rowObject)
return self.dbpool.runOperation(sql)
def deleteRowSQL(self, rowObject):
"""Build SQL to delete rowObject from the database.
"""
args = []
tableInfo = self.schema[rowObject.rowTableName]
# build where clause
for keyColumn, type in tableInfo.rowKeyColumns:
args.append(self.quote_value(rowObject.findAttribute(keyColumn),
type))
return self.getTableInfo(rowObject).deleteSQL % tuple(args)
def deleteRow(self, rowObject):
"""Delete the row for rowObject from the database.
"""
sql = self.deleteRowSQL(rowObject)
self.removeFromCache(rowObject)
return self.dbpool.runOperation(sql)
__all__ = ['SQLReflector']
| agpl-3.0 | 1,025,434,203,278,956,700 | 35.284404 | 106 | 0.56182 | false |
total-impact/depsy | models/search.py | 3 | 1682 | from sqlalchemy import sql
from package import prep_summary
from app import db
def autocomplete(search_str):
command = """(select project_name, impact, api_raw->'info'->>'summary' as summary, 'pypi_project' as type, 1 as first_sort, id
from package
where host='pypi'
and project_name ilike '{str}%'
order by impact desc
limit 3)
union
(select project_name, impact, api_raw->>'Title' as summary, 'cran_project' as type, 2 as first_sort, id
from package
where host='cran'
and project_name ilike '{str}%'
order by impact desc
limit 3)
union
(select name, impact, github_about->>'company' as summary, 'person' as type, 3 as first_sort, id::text as id
from person
where name ilike '{str}%'
or name ilike '% {str}%'
order by impact desc
limit 3)
union
(select unique_tag, "count_academic" as impact, namespace as summary, 'tag' as type, 4 as first_sort, id
from tags
where unique_tag ilike '{str}%'
or unique_tag ilike '% {str}%'
or unique_tag ilike '/{str}%'
order by "count_academic" desc
limit 3)
order by first_sort, impact desc""".format(str=search_str)
res = db.session.connection().execute(sql.text(command))
rows = res.fetchall()
ret = []
prev_type = "there is no current type"
for row in rows:
ret.append({
"name": row[0],
"impact": row[1],
"summary": prep_summary(row[2]),
"type": row[3],
"is_first": prev_type != row[3],
# row[4] is first_sort param, ignore it.
"id": row[5]
})
prev_type = row[3]
return ret
| mit | -3,141,067,465,177,040,400 | 24.104478 | 130 | 0.592152 | false |
BryceBrown/LinkstrDjango | rest_framework/templatetags/rest_framework.py | 1 | 9550 | from __future__ import unicode_literals, absolute_import
from django import template
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict
from django.utils.html import escape
from django.utils.safestring import SafeData, mark_safe
from rest_framework.compat import urlparse
from rest_framework.compat import force_text
from rest_framework.compat import six
import re
import string
register = template.Library()
# Note we don't use 'load staticfiles', because we need a 1.3 compatible
# version, so instead we include the `static` template tag ourselves.
# When 1.3 becomes unsupported by REST framework, we can instead start to
# use the {% load staticfiles %} tag, remove the following code,
# and add a dependancy that `django.contrib.staticfiles` must be installed.
# Note: We can't put this into the `compat` module because the compat import
# from rest_framework.compat import ...
# conflicts with this rest_framework template tag module.
try: # Django 1.5+
from django.contrib.staticfiles.templatetags.staticfiles import StaticFilesNode
@register.tag('static')
def do_static(parser, token):
return StaticFilesNode.handle_token(parser, token)
except ImportError:
try: # Django 1.4
from django.contrib.staticfiles.storage import staticfiles_storage
@register.simple_tag
def static(path):
"""
A template tag that returns the URL to a file
using staticfiles' storage backend
"""
return staticfiles_storage.url(path)
except ImportError: # Django 1.3
from urlparse import urljoin
from django import template
from django.templatetags.static import PrefixNode
class StaticNode(template.Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static_13(parser, token):
return StaticNode.handle_token(parser, token)
def replace_query_param(url, key, val):
"""
Given a URL and a key/val pair, set or replace an item in the query
parameters of the URL, and return the new URL.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = QueryDict(query).copy()
query_dict[key] = val
query = query_dict.urlencode()
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
# Regex for adding classes to html snippets
class_re = re.compile(r'(?<=class=["\'])(.*)(?=["\'])')
# Bunch of stuff cloned from urlize
LEADING_PUNCTUATION = ['(', '<', '<', '"', "'"]
TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>', '"', "'"]
DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \
('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]),
'|'.join([re.escape(x) for x in TRAILING_PUNCTUATION])))
simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
# And the template tags themselves...
@register.simple_tag
def optional_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return ''
snippet = "<a href='%s?next=%s'>Log in</a>" % (login_url, request.path)
return snippet
@register.simple_tag
def optional_logout(request):
"""
Include a logout snippet if REST framework's logout view is in the URLconf.
"""
try:
logout_url = reverse('rest_framework:logout')
except NoReverseMatch:
return ''
snippet = "<a href='%s?next=%s'>Log out</a>" % (logout_url, request.path)
return snippet
@register.simple_tag
def add_query_param(request, key, val):
"""
Add a query parameter to the current request url, and return the new url.
"""
return replace_query_param(request.get_full_path(), key, val)
@register.filter
def add_class(value, css_class):
"""
http://stackoverflow.com/questions/4124220/django-adding-css-classes-when-rendering-form-fields-in-a-template
Inserts classes into template variables that contain HTML tags,
useful for modifying forms without needing to change the Form objects.
Usage:
{{ field.label_tag|add_class:"control-label" }}
In the case of REST Framework, the filter is used to add Bootstrap-specific
classes to the forms.
"""
html = six.text_type(value)
match = class_re.search(html)
if match:
m = re.search(r'^%s$|^%s\s|\s%s\s|\s%s$' % (css_class, css_class,
css_class, css_class),
match.group(1))
if not m:
return mark_safe(class_re.sub(match.group(1) + " " + css_class,
html))
else:
return mark_safe(html.replace('>', ' class="%s">' % css_class, 1))
return value
@register.filter
def urlize_quoted_links(text, trim_url_limit=None, nofollow=True, autoescape=True):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links and links ending in .org, .net or
.com. Links can have trailing punctuation (periods, commas, close-parens)
and leading punctuation (opening parens) and it'll still do the right
thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
match = punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
# Make URL we want to point to.
url = None
if middle.startswith('http://') or middle.startswith('https://'):
url = middle
elif middle.startswith('www.') or ('@' not in middle and \
middle and middle[0] in string.ascii_letters + string.digits and \
(middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))):
url = 'http://%s' % middle
elif '@' in middle and not ':' in middle and simple_email_re.match(middle):
url = 'mailto:%s' % middle
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return mark_safe(''.join(words))
| apache-2.0 | -3,123,683,102,189,466,000 | 36.598425 | 143 | 0.586911 | false |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/ard.py | 4 | 12530 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .generic import GenericIE
from ..utils import (
determine_ext,
ExtractorError,
get_element_by_attribute,
qualities,
int_or_none,
parse_duration,
unified_strdate,
xpath_text,
)
from ..compat import compat_etree_fromstring
class ARDMediathekIE(InfoExtractor):
IE_NAME = 'ARD:mediathek'
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
_TESTS = [{
'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
'info_dict': {
'id': '29582122',
'ext': 'mp4',
'title': 'Ich liebe das Leben trotzdem',
'description': 'md5:45e4c225c72b27993314b31a84a5261c',
'duration': 4557,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
'info_dict': {
'id': '29522730',
'ext': 'mp4',
'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
'duration': 5252,
},
}, {
# audio
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
'info_dict': {
'id': '28488308',
'ext': 'mp3',
'title': 'Tod eines Fußballers',
'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
'duration': 3240,
},
}, {
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
'only_matching': True,
}]
def _extract_media_info(self, media_info_url, webpage, video_id):
media_info = self._download_json(
media_info_url, video_id, 'Downloading media JSON')
formats = self._extract_formats(media_info, video_id)
if not formats:
if '"fsk"' in webpage:
raise ExtractorError(
'This video is only available after 20:00', expected=True)
elif media_info.get('_geoblocked'):
raise ExtractorError('This video is not available due to geo restriction', expected=True)
self._sort_formats(formats)
duration = int_or_none(media_info.get('_duration'))
thumbnail = media_info.get('_previewImage')
subtitles = {}
subtitle_url = media_info.get('_subtitleUrl')
if subtitle_url:
subtitles['de'] = [{
'ext': 'ttml',
'url': subtitle_url,
}]
return {
'id': video_id,
'duration': duration,
'thumbnail': thumbnail,
'formats': formats,
'subtitles': subtitles,
}
def _extract_formats(self, media_info, video_id):
type_ = media_info.get('_type')
media_array = media_info.get('_mediaArray', [])
formats = []
for num, media in enumerate(media_array):
for stream in media.get('_mediaStreamArray', []):
stream_urls = stream.get('_stream')
if not stream_urls:
continue
if not isinstance(stream_urls, list):
stream_urls = [stream_urls]
quality = stream.get('_quality')
server = stream.get('_server')
for stream_url in stream_urls:
ext = determine_ext(stream_url)
if quality != 'auto' and ext in ('f4m', 'm3u8'):
continue
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
stream_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124',
video_id, preference=-1, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, 'mp4', preference=1, m3u8_id='hls', fatal=False))
else:
if server and server.startswith('rtmp'):
f = {
'url': server,
'play_path': stream_url,
'format_id': 'a%s-rtmp-%s' % (num, quality),
}
elif stream_url.startswith('http'):
f = {
'url': stream_url,
'format_id': 'a%s-%s-%s' % (num, ext, quality)
}
else:
continue
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
if type_ == 'audio':
f['vcodec'] = 'none'
formats.append(f)
return formats
def _real_extract(self, url):
# determine video id from url
m = re.match(self._VALID_URL, url)
numid = re.search(r'documentId=([0-9]+)', url)
if numid:
video_id = numid.group(1)
else:
video_id = m.group('video_id')
webpage = self._download_webpage(url, video_id)
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
if 'Diese Sendung ist für Jugendliche unter 12 Jahren nicht geeignet. Der Clip ist deshalb nur von 20 bis 6 Uhr verfügbar.' in webpage:
raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True)
if re.search(r'[\?&]rss($|[=&])', url):
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return GenericIE()._extract_rss(url, video_id, doc)
title = self._html_search_regex(
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
r'<meta name="dcterms.title" content="(.*?)"/>',
r'<h4 class="headline">(.*?)</h4>'],
webpage, 'title')
description = self._html_search_meta(
'dcterms.abstract', webpage, 'description', default=None)
if description is None:
description = self._html_search_meta(
'description', webpage, 'meta description')
# Thumbnail is sometimes not present.
# It is in the mobile version, but that seems to use a different URL
# structure altogether.
thumbnail = self._og_search_thumbnail(webpage, default=None)
media_streams = re.findall(r'''(?x)
mediaCollection\.addMediaStream\([0-9]+,\s*[0-9]+,\s*"[^"]*",\s*
"([^"]+)"''', webpage)
if media_streams:
QUALITIES = qualities(['lo', 'hi', 'hq'])
formats = []
for furl in set(media_streams):
if furl.endswith('.f4m'):
fid = 'f4m'
else:
fid_m = re.match(r'.*\.([^.]+)\.[^.]+$', furl)
fid = fid_m.group(1) if fid_m else None
formats.append({
'quality': QUALITIES(fid),
'format_id': fid,
'url': furl,
})
self._sort_formats(formats)
info = {
'formats': formats,
}
else: # request JSON file
info = self._extract_media_info(
'http://www.ardmediathek.de/play/media/%s' % video_id, webpage, video_id)
info.update({
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
})
return info
class ARDIE(InfoExtractor):
_VALID_URL = '(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
_TEST = {
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
'md5': 'd216c3a86493f9322545e045ddc3eb35',
'info_dict': {
'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
'id': '100',
'ext': 'mp4',
'duration': 2600,
'title': 'Die Story im Ersten: Mission unter falscher Flagge',
'upload_date': '20140804',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
player_url = mobj.group('mainurl') + '~playerXml.xml'
doc = self._download_xml(player_url, display_id)
video_node = doc.find('./video')
upload_date = unified_strdate(xpath_text(
video_node, './broadcastDate'))
thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
formats = []
for a in video_node.findall('.//asset'):
f = {
'format_id': a.attrib['type'],
'width': int_or_none(a.find('./frameWidth').text),
'height': int_or_none(a.find('./frameHeight').text),
'vbr': int_or_none(a.find('./bitrateVideo').text),
'abr': int_or_none(a.find('./bitrateAudio').text),
'vcodec': a.find('./codecVideo').text,
'tbr': int_or_none(a.find('./totalBitrate').text),
}
if a.find('./serverPrefix').text:
f['url'] = a.find('./serverPrefix').text
f['playpath'] = a.find('./fileName').text
else:
f['url'] = a.find('./fileName').text
formats.append(f)
self._sort_formats(formats)
return {
'id': mobj.group('id'),
'formats': formats,
'display_id': display_id,
'title': video_node.find('./title').text,
'duration': parse_duration(video_node.find('./duration').text),
'upload_date': upload_date,
'thumbnail': thumbnail,
}
class SportschauIE(ARDMediathekIE):
IE_NAME = 'Sportschau'
_VALID_URL = r'(?P<baseurl>https?://(?:www\.)?sportschau\.de/(?:[^/]+/)+video(?P<id>[^/#?]+))\.html'
_TESTS = [{
'url': 'http://www.sportschau.de/tourdefrance/videoseppeltkokainhatnichtsmitklassischemdopingzutun100.html',
'info_dict': {
'id': 'seppeltkokainhatnichtsmitklassischemdopingzutun100',
'ext': 'mp4',
'title': 'Seppelt: "Kokain hat nichts mit klassischem Doping zu tun"',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'Der ARD-Doping Experte Hajo Seppelt gibt seine Einschätzung zum ersten Dopingfall der diesjährigen Tour de France um den Italiener Luca Paolini ab.',
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
base_url = mobj.group('baseurl')
webpage = self._download_webpage(url, video_id)
title = get_element_by_attribute('class', 'headline', webpage)
description = self._html_search_meta('description', webpage, 'description')
info = self._extract_media_info(
base_url + '-mc_defaultQuality-h.json', webpage, video_id)
info.update({
'title': title,
'description': description,
})
return info
| gpl-2.0 | 9,107,583,091,590,075,000 | 38.878981 | 181 | 0.509423 | false |
claesenm/optunity | optunity/solvers/CMAES.py | 3 | 6744 | #! /usr/bin/env python
# Copyright (c) 2014 KU Leuven, ESAT-STADIUS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import functools
from .solver_registry import register_solver
from .util import Solver, _copydoc
from . import util
_numpy_available = True
try:
import numpy as np
except ImportError:
_numpy_available = False
_deap_available = True
try:
import deap
import deap.creator
import deap.base
import deap.tools
import deap.cma
import deap.algorithms
except ImportError:
_deap_available = False
except TypeError:
# this can happen because DEAP is in Python 2
# install needs to take proper care of converting
# 2 to 3 when necessary
_deap_available = False
class CMA_ES(Solver):
"""
.. include:: /global.rst
Please refer to |cmaes| for details about this algorithm.
This solver uses an implementation available in the DEAP library [DEAP2012]_.
.. warning:: This solver has dependencies on DEAP_ and NumPy_
and will be unavailable if these are not met.
.. _DEAP: https://code.google.com/p/deap/
.. _NumPy: http://www.numpy.org
"""
def __init__(self, num_generations, sigma=1.0, Lambda=None, **kwargs):
"""blah
.. warning:: |warning-unconstrained|
"""
if not _deap_available:
raise ImportError('This solver requires DEAP but it is missing.')
if not _numpy_available:
raise ImportError('This solver requires NumPy but it is missing.')
self._num_generations = num_generations
self._start = kwargs
self._sigma = sigma
self._lambda = Lambda
@staticmethod
def suggest_from_seed(num_evals, **kwargs):
"""Verify that we can effectively make a solver.
The doctest has to be skipped from automated builds, because DEAP may not be available
and yet we want documentation to be generated.
>>> s = CMA_ES.suggest_from_seed(30, x=1.0, y=-1.0, z=2.0)
>>> solver = CMA_ES(**s) #doctest:+SKIP
"""
fertility = 4 + 3 * math.log(len(kwargs))
d = dict(kwargs)
d['num_generations'] = int(math.ceil(float(num_evals) / fertility))
# num_gen is overestimated
# this will require slightly more function evaluations than permitted by num_evals
return d
@property
def num_generations(self):
return self._num_generations
@property
def start(self):
"""Returns the starting point for CMA-ES."""
return self._start
@property
def lambda_(self):
return self._lambda
@property
def sigma(self):
return self._sigma
@_copydoc(Solver.optimize)
def optimize(self, f, maximize=True, pmap=map):
toolbox = deap.base.Toolbox()
if maximize:
fit = 1.0
else:
fit = -1.0
deap.creator.create("FitnessMax", deap.base.Fitness,
weights=(fit,))
Fit = deap.creator.FitnessMax
deap.creator.create("Individual", list,
fitness=Fit)
Individual = deap.creator.Individual
if self.lambda_:
strategy = deap.cma.Strategy(centroid=list(self.start.values()),
sigma=self.sigma, lambda_=self.lambda_)
else:
strategy = deap.cma.Strategy(centroid=list(self.start.values()),
sigma=self.sigma)
toolbox.register("generate", strategy.generate, Individual)
toolbox.register("update", strategy.update)
@functools.wraps(f)
def evaluate(individual):
return (util.score(f(**dict([(k, v)
for k, v in zip(self.start.keys(),
individual)]))),)
toolbox.register("evaluate", evaluate)
toolbox.register("map", pmap)
hof = deap.tools.HallOfFame(1)
deap.algorithms.eaGenerateUpdate(toolbox=toolbox,
ngen=self._num_generations,
halloffame=hof, verbose=False)
return dict([(k, v)
for k, v in zip(self.start.keys(), hof[0])]), None
# CMA_ES solver requires deap > 1.0.1
# http://deap.readthedocs.org/en/latest/examples/cmaes.html
if _deap_available and _numpy_available:
CMA_ES = register_solver('cma-es', 'covariance matrix adaptation evolutionary strategy',
['CMA-ES: covariance matrix adaptation evolutionary strategy',
' ',
'This method requires the following parameters:',
'- num_generations :: number of generations to use',
'- sigma :: (optional) initial covariance, default 1',
'- Lambda :: (optional) measure of reproducibility',
'- starting point: through kwargs'
' ',
'This method is described in detail in:',
'Hansen and Ostermeier, 2001. Completely Derandomized Self-Adaptation in Evolution Strategies. Evolutionary Computation'
])(CMA_ES)
| bsd-3-clause | 5,052,498,143,691,891,000 | 36.054945 | 144 | 0.621886 | false |
mKeRix/home-assistant | homeassistant/components/zwave/light.py | 12 | 13263 | """Support for Z-Wave lights."""
import logging
from threading import Timer
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from . import CONF_REFRESH_DELAY, CONF_REFRESH_VALUE, ZWaveDeviceEntity, const
_LOGGER = logging.getLogger(__name__)
COLOR_CHANNEL_WARM_WHITE = 0x01
COLOR_CHANNEL_COLD_WHITE = 0x02
COLOR_CHANNEL_RED = 0x04
COLOR_CHANNEL_GREEN = 0x08
COLOR_CHANNEL_BLUE = 0x10
# Some bulbs have an independent warm and cool white light LEDs. These need
# to be treated differently, aka the zw098 workaround. Ensure these are added
# to DEVICE_MAPPINGS below.
# (Manufacturer ID, Product ID) from
# https://github.com/OpenZWave/open-zwave/blob/master/config/manufacturer_specific.xml
AEOTEC_ZW098_LED_BULB_LIGHT = (0x86, 0x62)
AEOTEC_ZWA001_LED_BULB_LIGHT = (0x371, 0x1)
AEOTEC_ZWA002_LED_BULB_LIGHT = (0x371, 0x2)
HANK_HKZW_RGB01_LED_BULB_LIGHT = (0x208, 0x4)
ZIPATO_RGB_BULB_2_LED_BULB_LIGHT = (0x131, 0x3)
WORKAROUND_ZW098 = "zw098"
DEVICE_MAPPINGS = {
AEOTEC_ZW098_LED_BULB_LIGHT: WORKAROUND_ZW098,
AEOTEC_ZWA001_LED_BULB_LIGHT: WORKAROUND_ZW098,
AEOTEC_ZWA002_LED_BULB_LIGHT: WORKAROUND_ZW098,
HANK_HKZW_RGB01_LED_BULB_LIGHT: WORKAROUND_ZW098,
ZIPATO_RGB_BULB_2_LED_BULB_LIGHT: WORKAROUND_ZW098,
}
# Generate midpoint color temperatures for bulbs that have limited
# support for white light colors
TEMP_COLOR_MAX = 500 # mireds (inverted)
TEMP_COLOR_MIN = 154
TEMP_MID_HASS = (TEMP_COLOR_MAX - TEMP_COLOR_MIN) / 2 + TEMP_COLOR_MIN
TEMP_WARM_HASS = (TEMP_COLOR_MAX - TEMP_COLOR_MIN) / 3 * 2 + TEMP_COLOR_MIN
TEMP_COLD_HASS = (TEMP_COLOR_MAX - TEMP_COLOR_MIN) / 3 + TEMP_COLOR_MIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Light from Config Entry."""
@callback
def async_add_light(light):
"""Add Z-Wave Light."""
async_add_entities([light])
async_dispatcher_connect(hass, "zwave_new_light", async_add_light)
def get_device(node, values, node_config, **kwargs):
"""Create Z-Wave entity device."""
refresh = node_config.get(CONF_REFRESH_VALUE)
delay = node_config.get(CONF_REFRESH_DELAY)
_LOGGER.debug(
"node=%d value=%d node_config=%s CONF_REFRESH_VALUE=%s"
" CONF_REFRESH_DELAY=%s",
node.node_id,
values.primary.value_id,
node_config,
refresh,
delay,
)
if node.has_command_class(const.COMMAND_CLASS_SWITCH_COLOR):
return ZwaveColorLight(values, refresh, delay)
return ZwaveDimmer(values, refresh, delay)
def brightness_state(value):
"""Return the brightness and state."""
if value.data > 0:
return round((value.data / 99) * 255), STATE_ON
return 0, STATE_OFF
def byte_to_zwave_brightness(value):
"""Convert brightness in 0-255 scale to 0-99 scale.
`value` -- (int) Brightness byte value from 0-255.
"""
if value > 0:
return max(1, round((value / 255) * 99))
return 0
def ct_to_hs(temp):
"""Convert color temperature (mireds) to hs."""
colorlist = list(
color_util.color_temperature_to_hs(
color_util.color_temperature_mired_to_kelvin(temp)
)
)
return [int(val) for val in colorlist]
class ZwaveDimmer(ZWaveDeviceEntity, LightEntity):
"""Representation of a Z-Wave dimmer."""
def __init__(self, values, refresh, delay):
"""Initialize the light."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._brightness = None
self._state = None
self._supported_features = None
self._delay = delay
self._refresh_value = refresh
self._zw098 = None
# Enable appropriate workaround flags for our device
# Make sure that we have values for the key before converting to int
if self.node.manufacturer_id.strip() and self.node.product_id.strip():
specific_sensor_key = (
int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16),
)
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZW098:
_LOGGER.debug("AEOTEC ZW098 workaround enabled")
self._zw098 = 1
# Used for value change event handling
self._refreshing = False
self._timer = None
_LOGGER.debug(
"self._refreshing=%s self.delay=%s", self._refresh_value, self._delay
)
self.value_added()
self.update_properties()
def update_properties(self):
"""Update internal properties based on zwave values."""
# Brightness
self._brightness, self._state = brightness_state(self.values.primary)
def value_added(self):
"""Call when a new value is added to this entity."""
self._supported_features = SUPPORT_BRIGHTNESS
if self.values.dimming_duration is not None:
self._supported_features |= SUPPORT_TRANSITION
def value_changed(self):
"""Call when a value for this entity's node has changed."""
if self._refresh_value:
if self._refreshing:
self._refreshing = False
else:
def _refresh_value():
"""Use timer callback for delayed value refresh."""
self._refreshing = True
self.values.primary.refresh()
if self._timer is not None and self._timer.isAlive():
self._timer.cancel()
self._timer = Timer(self._delay, _refresh_value)
self._timer.start()
return
super().value_changed()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def _set_duration(self, **kwargs):
"""Set the transition time for the brightness value.
Zwave Dimming Duration values:
0x00 = instant
0x01-0x7F = 1 second to 127 seconds
0x80-0xFE = 1 minute to 127 minutes
0xFF = factory default
"""
if self.values.dimming_duration is None:
if ATTR_TRANSITION in kwargs:
_LOGGER.debug("Dimming not supported by %s", self.entity_id)
return
if ATTR_TRANSITION not in kwargs:
self.values.dimming_duration.data = 0xFF
return
transition = kwargs[ATTR_TRANSITION]
if transition <= 127:
self.values.dimming_duration.data = int(transition)
elif transition > 7620:
self.values.dimming_duration.data = 0xFE
_LOGGER.warning("Transition clipped to 127 minutes for %s", self.entity_id)
else:
minutes = int(transition / 60)
_LOGGER.debug(
"Transition rounded to %d minutes for %s", minutes, self.entity_id
)
self.values.dimming_duration.data = minutes + 0x7F
def turn_on(self, **kwargs):
"""Turn the device on."""
self._set_duration(**kwargs)
# Zwave multilevel switches use a range of [0, 99] to control
# brightness. Level 255 means to set it to previous value.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
brightness = byte_to_zwave_brightness(self._brightness)
else:
brightness = 255
if self.node.set_dimmer(self.values.primary.value_id, brightness):
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self._set_duration(**kwargs)
if self.node.set_dimmer(self.values.primary.value_id, 0):
self._state = STATE_OFF
class ZwaveColorLight(ZwaveDimmer):
"""Representation of a Z-Wave color changing light."""
def __init__(self, values, refresh, delay):
"""Initialize the light."""
self._color_channels = None
self._hs = None
self._ct = None
self._white = None
super().__init__(values, refresh, delay)
def value_added(self):
"""Call when a new value is added to this entity."""
super().value_added()
self._supported_features |= SUPPORT_COLOR
if self._zw098:
self._supported_features |= SUPPORT_COLOR_TEMP
elif self._color_channels is not None and self._color_channels & (
COLOR_CHANNEL_WARM_WHITE | COLOR_CHANNEL_COLD_WHITE
):
self._supported_features |= SUPPORT_WHITE_VALUE
def update_properties(self):
"""Update internal properties based on zwave values."""
super().update_properties()
if self.values.color is None:
return
if self.values.color_channels is None:
return
# Color Channels
self._color_channels = self.values.color_channels.data
# Color Data String
data = self.values.color.data
# RGB is always present in the openzwave color data string.
rgb = [int(data[1:3], 16), int(data[3:5], 16), int(data[5:7], 16)]
self._hs = color_util.color_RGB_to_hs(*rgb)
# Parse remaining color channels. Openzwave appends white channels
# that are present.
index = 7
# Warm white
if self._color_channels & COLOR_CHANNEL_WARM_WHITE:
warm_white = int(data[index : index + 2], 16)
index += 2
else:
warm_white = 0
# Cold white
if self._color_channels & COLOR_CHANNEL_COLD_WHITE:
cold_white = int(data[index : index + 2], 16)
index += 2
else:
cold_white = 0
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if warm_white > 0:
self._ct = TEMP_WARM_HASS
self._hs = ct_to_hs(self._ct)
elif cold_white > 0:
self._ct = TEMP_COLD_HASS
self._hs = ct_to_hs(self._ct)
else:
# RGB color is being used. Just report midpoint.
self._ct = TEMP_MID_HASS
elif self._color_channels & COLOR_CHANNEL_WARM_WHITE:
self._white = warm_white
elif self._color_channels & COLOR_CHANNEL_COLD_WHITE:
self._white = cold_white
# If no rgb channels supported, report None.
if not (
self._color_channels & COLOR_CHANNEL_RED
or self._color_channels & COLOR_CHANNEL_GREEN
or self._color_channels & COLOR_CHANNEL_BLUE
):
self._hs = None
@property
def hs_color(self):
"""Return the hs color."""
return self._hs
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._white
@property
def color_temp(self):
"""Return the color temperature."""
return self._ct
def turn_on(self, **kwargs):
"""Turn the device on."""
rgbw = None
if ATTR_WHITE_VALUE in kwargs:
self._white = kwargs[ATTR_WHITE_VALUE]
if ATTR_COLOR_TEMP in kwargs:
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if kwargs[ATTR_COLOR_TEMP] > TEMP_MID_HASS:
self._ct = TEMP_WARM_HASS
rgbw = "#000000ff00"
else:
self._ct = TEMP_COLD_HASS
rgbw = "#00000000ff"
elif ATTR_HS_COLOR in kwargs:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_WHITE_VALUE not in kwargs:
# white LED must be off in order for color to work
self._white = 0
if (
ATTR_WHITE_VALUE in kwargs or ATTR_HS_COLOR in kwargs
) and self._hs is not None:
rgbw = "#"
for colorval in color_util.color_hs_to_RGB(*self._hs):
rgbw += format(colorval, "02x")
if self._white is not None:
rgbw += format(self._white, "02x") + "00"
else:
rgbw += "0000"
if rgbw and self.values.color:
self.values.color.data = rgbw
super().turn_on(**kwargs)
| mit | 5,844,130,539,537,956,000 | 32.40806 | 87 | 0.596698 | false |
antoviaque/edx-platform | common/djangoapps/track/tests/test_shim.py | 9 | 6809 | """Ensure emitted events contain the fields legacy processors expect to find."""
from mock import sentinel
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_events_equal
from track.tests import EventTrackingTestCase, FROZEN_TIME
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
GOOGLE_ANALYTICS_PROCESSOR = [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
@override_settings(
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
def test_event_field_mapping(self):
data = {sentinel.key: sentinel.value}
context = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'event_type': sentinel.name,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
assert_events_equal(expected_event, emitted_event)
def test_missing_fields(self):
self.tracker.emit(sentinel.name)
emitted_event = self.get_event()
expected_event = {
'accept_language': '',
'referer': '',
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR,
)
class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields necessary for Google Analytics."""
def test_event_fields(self):
""" Test that course_id is added as the label if present, and nonInteraction is set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
def test_no_course_id(self):
""" Test that a label is not added if course_id is not specified, but nonInteraction is still set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_BACKENDS={
'0': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'first': {'ENGINE': 'track.tests.InMemoryBackend'}
},
'processors': [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
}
},
'1': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'second': {
'ENGINE': 'track.tests.InMemoryBackend'
}
}
}
}
}
)
class MultipleShimGoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure changes don't impact other backends"""
def test_multiple_backends(self):
data = {
sentinel.key: sentinel.value,
}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
segment_emitted_event = self.tracker.backends['0'].backends['first'].events[0]
log_emitted_event = self.tracker.backends['1'].backends['second'].events[0]
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, segment_emitted_event)
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, log_emitted_event)
| agpl-3.0 | -3,236,976,696,412,245,500 | 30.233945 | 110 | 0.53488 | false |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/django/forms/models.py | 6 | 55443 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
import warnings
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, smart_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.virtual_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_virtual_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
try:
self.instance = construct_instance(self, self.instance, opts.fields, exclude)
except ValidationError as e:
self._update_errors(e)
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.rel is not None:
field = field.rel.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts, error_messages=error_messages)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
fk_value = getattr(self.instance, self.fk.rel.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.rel.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s.%s'."
% (fk_name, parent_model._meta.app_label, parent_model._meta.object_name))
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s.%s' has no field named '%s'."
% (model._meta.app_label, model._meta.object_name, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s.%s' has no ForeignKey to '%s.%s'." % (
model._meta.app_label,
model._meta.object_name,
parent_model._meta.app_label,
parent_model._meta.object_name,
)
)
else:
raise ValueError(
"'%s.%s' has more than one ForeignKey to '%s.%s'." % (
model._meta.app_label,
model._meta.object_name,
parent_model._meta.app_label,
parent_model._meta.object_name,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset.all()
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in queryset
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in queryset:
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=None,
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
if cache_choices is not None:
warnings.warn("cache_choices has been deprecated and will be "
"removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2)
else:
cache_choices = False
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.choice_cache = None
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=None, required=True,
widget=None, label=None, initial=None,
help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| mit | -7,673,982,182,908,542,000 | 40.717833 | 116 | 0.586386 | false |
wkschwartz/django | django/contrib/contenttypes/forms.py | 21 | 3761 | from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.forms import ModelForm, modelformset_factory
from django.forms.models import BaseModelFormSet
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=False,
prefix=None, queryset=None, **kwargs):
opts = self.model._meta
self.instance = instance
self.rel_name = (
opts.app_label + '-' + opts.model_name + '-' +
self.ct_field.name + '-' + self.ct_fk_field.name
)
self.save_as_new = save_as_new
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model),
self.ct_fk_field.name: self.instance.pk,
})
super().__init__(queryset=qs, data=data, files=files, prefix=prefix, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super().initial_form_count()
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return (
opts.app_label + '-' + opts.model_name + '-' +
cls.ct_field.name + '-' + cls.ct_fk_field.name
)
def save_new(self, form, commit=True):
setattr(form.instance, self.ct_field.get_attname(), ContentType.objects.get_for_model(self.instance).pk)
setattr(form.instance, self.ct_fk_field.get_attname(), self.instance.pk)
return form.save(commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None, formfield_callback=None,
validate_max=False, for_concrete_model=True,
min_num=None, validate_min=False,
absolute_max=None, can_delete_extra=True):
"""
Return a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.remote_field.model != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
exclude = [*(exclude or []), ct_field.name, fk_field.name]
FormSet = modelformset_factory(
model, form=form, formfield_callback=formfield_callback,
formset=formset, extra=extra, can_delete=can_delete,
can_order=can_order, fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max, min_num=min_num, validate_min=validate_min,
absolute_max=absolute_max, can_delete_extra=can_delete_extra,
)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
| bsd-3-clause | -590,207,776,006,884,500 | 43.77381 | 112 | 0.603031 | false |
aissehust/sesame-paste-noodle | mlbase/layers/output.py | 1 | 1364 | from .layer import Layer
from .layer import layerhelper
import theano
import theano.tensor as T
__all__ = [
'SoftMax',
]
@layerhelper
class SoftMax(Layer):
debugname = 'softmax'
LayerTypeName = 'SoftMax'
yaml_tag = u'!SoftMax'
def __init__(self):
super(SoftMax, self).__init__()
def getpara(self):
return []
def forward(self, inputtensor):
inputimage = inputtensor[0]
#e_x = T.exp(inputimage - inputimage.max(axis=1, keepdims=True))
#out = e_x / e_x.sum(axis=1, keepdims=True)
#return (T.nnet.softmax(inputimage),)
e_x = T.exp(inputimage - inputimage.max(axis=1).dimshuffle(0, 'x'))
return (e_x / e_x.sum(axis=1).dimshuffle(0, 'x'),)
def forwardSize(self, inputsize):
return inputsize
def fillToObjMap(self):
objDict = super(SoftMax, self).fillToObjMap()
return objDict
def loadFromObjMap(self, tmap):
super(SoftMax, self).loadFromObjMap(tmap)
@classmethod
def to_yaml(cls, dumper, data):
obj_dict = data.fillToObjMap()
node = dumper.represent_mapping(SoftMax.yaml_tag, obj_dict)
return node
@classmethod
def from_yaml(cls, loader, node):
obj_dict = loader.construct_mapping(node)
ret = SoftMax()
ret.loadFromObjMap(obj_dict)
return ret
| bsd-3-clause | -7,449,609,179,001,500,000 | 25.230769 | 75 | 0.61217 | false |
davism4/MineHero | src/level.py | 1 | 7451 | import pygame, tiling, sprites
import constants as con, resources as res
import os
import randomMap
import time
def run(grid_data = None):
screen = res.screen
tilesize = con.TILE_WIDTH
player = sprites.JonesSprite()
clock = pygame.time.Clock()
# Initialize the grid
board = tiling.Board(con.GRID_SIZE, con.GRID_SIZE)
# TODO: Read grid data (2D-array), parse into tiles
## for i in range(con.GRID_SIZE):
## board.tileAt(i,0).setValue(con.TYPE_WALL)
## board.tileAt(i,con.GRID_SIZE-1).setValue(con.TYPE_WALL)
## board.tileAt(0,i).setValue(con.TYPE_WALL)
## board.tileAt(con.GRID_SIZE-1,i).setValue(con.TYPE_WALL)
##
## board.tileAt(5, 5).setValue(con.TYPE_BOMB_ACTIVE)
## board.tileAt(3, 3).setValue(con.TYPE_BOMB_ACTIVE)
grid = randomMap.RandomMap()
pos_x = 1
pos_y = (con.GRID_SIZE-1)
# Finish setting up the board
for x in range(0, con.GRID_SIZE):
for y in range(0, con.GRID_SIZE):
t = board.tileAt(x,y)
t.setValue(grid[x][y])
for x in range(0, con.GRID_SIZE):
for y in range(0, con.GRID_SIZE):
t = board.tileAt(x,y)
if (t.getValue() != con.TYPE_WALL and not t.getIsBomb()):
board.setTileValues(x,y)
if (t.getValue() == con.TYPE_START):
t.setVisible()
pos_x = x
pos_y = y
if (t.getValue() == con.TYPE_EXIT):
t.setVisible()
# Player variables
health = con.MAX_HEALTH
direction = con.STAY # used for animation
dest_x = pos_x
dest_y = pos_y
draw_x = pos_x*tilesize
draw_y = pos_y*tilesize
hp_x = int(round(con.HEALTH_X*con.SCREEN_WIDTH))
hp_y = int(round(con.HEALTH_Y*con.SCREEN_HEIGHT))
#Sound initalization
#This currently works when this file has a subdirectory called sound.
####################################################
# Main Code
####################################################
def can_move_to(x, y):
if (x >= con.GRID_SIZE or x < 0):
res.hitWall.play()
return False
elif (y >= con.GRID_SIZE or y < 0):
res.hitWall.play()
return False
elif (board.tileAt(x,y).getValue()==con.TYPE_WALL):
res.hitWall.play()
return False
else:
return True
gameExit = False
pygame.display.update()
walking = False
# BEGIN MAIN LOOP
while (not gameExit):
if (health <= 0):
res.levelEnd.play()
screen.blit(res.Lose,(0,0))
pygame.display.flip()
time.sleep(5.5)
gameExit = true
# Don't ask for movement input if already walking
if (walking):
if (direction == con.NORTH):
if (draw_y > dest_y*tilesize):
draw_y -= con.MOVE_SPEED
else:
draw_y = dest_y*tilesize
walking = False
elif (direction == con.SOUTH):
if (draw_y < dest_y*tilesize):
draw_y += con.MOVE_SPEED
else:
draw_y = dest_y*tilesize
walking = False
elif (direction == con.EAST):
if (draw_x < dest_x*tilesize):
draw_x += con.MOVE_SPEED
else:
draw_x = dest_x*tilesize
walking = False
elif (direction == con.WEST):
if (draw_x > dest_x*tilesize):
draw_x -= con.MOVE_SPEED
else:
draw_x = dest_x*tilesize
walking = False
if (not walking):
board.tileAt(dest_x,dest_y).setVisible()
# not walking --> accept input
dest_x = pos_x
dest_y = pos_y
for event in pygame.event.get():
if (event.type == pygame.QUIT):
gameExit = True
elif (event.type == pygame.KEYDOWN):
if (event.key == pygame.K_ESCAPE):
gameExit = True
#Only register movement input when NOT animating player
elif (not walking):
if (event.key == pygame.K_LEFT):
direction = con.WEST
dest_x = pos_x - 1
elif (event.key == pygame.K_RIGHT):
direction = con.EAST
dest_x = pos_x + 1
elif (event.key == pygame.K_UP):
direction = con.NORTH
dest_y = pos_y - 1
elif (event.key == pygame.K_DOWN):
direction = con.SOUTH
dest_y = pos_y + 1
#Major walking routine
if can_move_to(dest_x,dest_y):
#You hit a bomb
if (board.tileAt(dest_x,dest_y).getValue() == con.TYPE_BOMB_ACTIVE):
res.hitBomb.play()
health -= 1
(board.tileAt(dest_x,dest_y)).setValue(con.TYPE_BOMB_INACTIVE)
#You step on a numbered tile that hasn't been revealed.
elif ((board.tileAt(dest_x,dest_y).getValue() > con.TYPE_EMPTY) and (board.tileAt(dest_x,dest_y).getValue() < con.MAX_SURROUNDING) and (board.tileAt(dest_x,dest_y).getVisible() == False)):
res.revealNum.play()
elif ((board.tileAt(dest_x,dest_y).getValue() == con.TYPE_EXIT)):
res.levelEnd.play()
screen.blit(res.Win,(0,0))
pygame.display.flip()
time.sleep(5.5)
gameExit = True
pos_x = dest_x
pos_y = dest_y
walking = True
player.setDirection(direction)
#print(board.tileAt(dest_x,dest_y).getValue())
pygame.event.clear()
screen.fill(con.WHITE)
# Tiles are mainly static
for y in range(0, con.GRID_SIZE):
for x in range(0, con.GRID_SIZE):
sprite = board.tileAt(x,y).getSprite()
if board.tileAt(x,y).getValue()==con.TYPE_WALL:
frame = sprite.frame()
elif board.tileAt(x,y).getVisible():
frame = sprite.frameVisible()
else:
frame = sprite.frameHidden()
screen.blit(frame, sprite.rect(x*tilesize, y*tilesize))
#pygame.draw.rect(screen, con.GREEN, (draw_x, draw_y, tilesize, tilesize))
screen.blit(player.frame(), player.rect(draw_x, draw_y))
player.animateNext()
for h in range(0, health):
pygame.draw.rect(screen, con.RED, (hp_x + h*15, hp_y, 10, 10))
#screen.blit(image, pygame.Rect(0, 0, con.SCREEN_WIDTH, con.SCREEN_HEIGHT))
# healthLabel = myfont.render('health: '+`health`,1,white)
# screen.blit(healthLabel, (hp_x, hp_y))
# clear before rendering graphics
#pygame.event.clear()
clock.tick(1/con.FRAMES_PER_SECOND)
pygame.display.update()
return
| gpl-3.0 | -660,367,275,972,047,100 | 35.169903 | 212 | 0.481949 | false |
mengxn/tensorflow | tensorflow/python/layers/base.py | 9 | 14122 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the base Layer class, from which all layers inherit.
This is a private class and its internal implementation is subject to changes
in the future.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import inspect
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
class _Layer(object):
"""Base layer class.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
This is the class from which all layers inherit, implementing common
infrastructure functionality.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing variables,
losses, and updates, as well as applying TensorFlow ops to input tensors.
Properties:
trainable: Whether the layer should be trained (boolean).
name: The name of the layer (string).
dtype: Default dtype of the layer (dtypes.float32).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
"""
def __init__(self, trainable=True, name=None,
dtype=dtypes.float32, **kwargs):
# We use a kwargs dict here because these kwargs only exist
# for compatibility reasons.
# The list of kwargs is subject to changes in the future.
# We do not want to commit to it or to expose the list to users at all.
# Note this is exactly as safe as defining kwargs in the function signature,
# the only difference being that the list of valid kwargs is defined
# below rather rather in the signature, and default values are defined
# in calls to kwargs.get().
allowed_kwargs = {
'_scope',
'_reuse',
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
self._trainable = trainable
self._built = False
self._trainable_variables = []
self._non_trainable_variables = []
self._updates = []
self._losses = []
self._reuse = kwargs.get('_reuse')
self._graph = ops.get_default_graph()
self.dtype = dtype
# Determine base name (non-unique).
if isinstance(name, vs.VariableScope):
base_name = name.name
else:
base_name = name
if not name:
base_name = _to_snake_case(self.__class__.__name__)
self._base_name = base_name
# Determine variable scope.
scope = kwargs.get('_scope')
if scope:
self._scope = next(vs.variable_scope(scope).gen)
else:
self._scope = None
# Unique name is borrowed from scope to match variable names.
if self._scope is not None:
self._name = self._scope.name
else:
# No name available until we see a scope
self._name = None
def __setattr__(self, name, value):
if hasattr(self, name):
# Only allow private attributes to be set more than once, under the
# convention that private attributes should only be set from inside
# the class.
# All attributes meant to be set several times should be set to private.
if name[0] != '_':
raise AttributeError('Read-only property cannot be set: %s' % name)
super(_Layer, self).__setattr__(name, value)
@property
def name(self):
if self._name is None:
raise ValueError(
'No name available for layer because it has not been used yet.')
return self._name
@property
def trainable_variables(self):
return self._trainable_variables if self.trainable else []
@property
def non_trainable_variables(self):
return self._non_trainable_variables if self.trainable else self.variables
@property
def trainable_weights(self):
return self.trainable_variables
@property
def non_trainable_weights(self):
return self.non_trainable_variables
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self._trainable_variables + self._non_trainable_variables
@property
def updates(self):
return self._updates
@property
def losses(self):
return self._losses
@property
def built(self):
return self._built
@property
def trainable(self):
return self._trainable
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.variables
def build(self, _):
"""Creates the variables of the layer.
"""
self._built = True
def call(self, inputs, **kwargs):
"""The logic of the layer lives here.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments.
Returns:
Output tensor(s).
"""
raise NotImplementedError
def _compute_output_shape(self, input_shape):
"""Computes the output shape of the layer given the input shape.
Assumes that the layer will be built to match that input shape.
Args:
input_shape: A (possibly nested tuple of) `TensorShape`. It need not
be fully defined (e.g. the batch size may be unknown).
Returns:
A (possibly nested tuple of) `TensorShape`.
Raises:
TypeError: if `input_shape` is not a (possibly nested tuple of)
`TensorShape`.
ValueError: if `input_shape` is incomplete or is incompatible with the
the layer.
"""
raise NotImplementedError
def _add_variable(self, name, shape, dtype=None,
initializer=None, regularizer=None, trainable=True,
variable_getter=vs.get_variable):
"""Adds a new variable to the layer.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
variable_getter: The getter to use for TensorFlow variables.
Returns:
The created variable.
"""
if dtype is None:
dtype = self.dtype
existing_variables = set(tf_variables.global_variables())
variable = variable_getter(name,
shape=shape,
initializer=initializer,
dtype=dtype,
trainable=trainable and self.trainable)
# TODO(sguada) fix name = variable.op.name
if variable in existing_variables:
return variable
if regularizer:
# To match the behavior of tf.get_variable(), we only
# apply regularization if the variable is newly created.
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
else:
with ops.colocate_with(variable.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(variable)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
if trainable:
self._trainable_variables.append(variable)
else:
self._non_trainable_variables.append(variable)
return variable
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**, the kwarg 'scope' is reserved for use by the Layer.
Returns:
Output tensor(s).
"""
scope = kwargs.pop('scope', None)
# Define a custom getter to override tf.get_variable when creating layer
# variables. The current custom getter is nested by the variable scope.
def variable_getter(getter, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, **getter_kwargs):
return self._add_variable(
name, shape, initializer=initializer, regularizer=regularizer,
dtype=dtype, trainable=trainable,
variable_getter=functools.partial(getter, **getter_kwargs))
if not self._built and self._scope is None:
# If constructed with _scope=None, lazy setting of scope.
if self._reuse:
self._scope = next(vs.variable_scope(
scope if scope is not None else self._base_name).gen)
else:
self._scope = next(vs.variable_scope(
scope, default_name=self._base_name).gen)
self._name = self._scope.name
# Build (if necessary) and call the layer, inside a variable
# scope.
with vs.variable_scope(self._scope,
reuse=True if self._built else self._reuse,
custom_getter=variable_getter) as scope:
# Ensure the Layer, if being reused, is working with inputs from
# the same graph as where it was created.
try:
ops._get_graph_from_inputs(nest.flatten(inputs), graph=self.graph) # pylint: disable=protected-access
except ValueError as e:
raise ValueError("Inputs' and Layer's graphs are not the same: %s" % e)
with ops.name_scope(scope.original_name_scope):
if not self.built:
input_list = [
ops.convert_to_tensor(x, name='input')
for x in nest.flatten(inputs)]
input_shapes = [x.get_shape() for x in input_list]
if len(input_shapes) == 1:
self.build(input_shapes[0])
else:
self.build(input_shapes)
self._built = True
outputs = self.call(inputs, *args, **kwargs)
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
output_list = _to_list(outputs)
for output in output_list:
with ops.name_scope('ActivityRegularizer'):
activity_regularization = self.activity_regularizer(output)
self._losses.append(activity_regularization)
_add_elements_to_collection(
activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
@property
def graph(self):
return self._graph
def __deepcopy__(self, memo):
no_copy = set(['_graph'])
shallow_copy = set(['_scope'])
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in no_copy:
setattr(result, k, v)
elif k in shallow_copy:
setattr(result, k, copy.copy(v))
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def apply(self, inputs, **kwargs):
"""Apply the layer on a input.
This simply wraps `self.__call__`.
Arguments:
inputs: Input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, **kwargs)
def _to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def _to_list(x):
"""This normalizes a list/tuple or single element into a list.
If a single element is passed, we return
a list of size 1 containing the element.
Arguments:
x: list or tuple or single element.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x]
def _add_elements_to_collection(elements, collections):
elements = _to_list(elements)
collections = _to_list(collections)
for name in collections:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
| apache-2.0 | 4,677,548,087,256,835,000 | 32.704057 | 110 | 0.65274 | false |
POFK/utilTool | stock/tools/Data.py | 1 | 3702 | #!/usr/bin/env python
# coding=utf-8
from abc import ABCMeta, abstractmethod
import datetime
import numpy as np
import pandas as pd
import tushare as ts
import tqdm
#Dir = '/mnt/data/'
print 'tushare ==>', ts.__version__
class Data(object):
__metaclass__ = ABCMeta
def __init__(self):
self.today = datetime.datetime.now().strftime('%Y%m%d')
def Raise(self):
raise NotImplementedError("Must be implemented!")
@abstractmethod
def GetIndex(self):
pass
def Save(self):
pass
class LoadHistData(Data):
def __init__(self):
super(LoadHistData, self).__init__()
def CalTime(self, timeToMarket='20160101', range=3):
ttm = int(timeToMarket)
ttd = int(self.today)
if ttm + range * 10000 < ttd:
ttm = ttd - range * 10000
time = [str(ttm)]
while True:
if ttm + 10000 < ttd:
ttm += 10000
time.append(str(ttm))
else:
time.append(str(ttd))
break
return time
def ConvertDateFormat(self, date):
y = date[:4]
m = date[4:6]
d = date[6:8]
return y+'-'+m+'-'+d
def GetHistData(self, code='', ktype='D', timeToMarket='20160101', range=3):
''' range: starting from x year before timeToMarket '''
time_range = self.CalTime(timeToMarket=timeToMarket, range=range)
Data=[]
for i in np.arange(len(time_range)-1):
start = self.ConvertDateFormat(time_range[i])
end = self.ConvertDateFormat(time_range[i+1])
k_data = ts.get_k_data(code, autype='qfq', ktype=ktype, start=start, end=end)
Data.append(k_data)
DataAll = pd.concat(Data, ignore_index=True)
return DataAll
def GetAllHistData(self, path='/mnt/data/HistData_Dkline.hdf5', range=3):
# read by: pd.read_hdf(path, key)
index = self.GetIndex()
for code in tqdm.tqdm(index.index):
# print code, str(index[code])
df = self.GetHistData(code=code, ktype='D', timeToMarket=index[code], range=3)
df.to_hdf(path, 'code_'+code)
class LoadInfo(Data):
def __init__(self):
super(LoadInfo, self).__init__()
def GetIndex(self):
data = ts.get_stock_basics(date=None)
ind = data.index[data.timeToMarket==0]
data.drop(ind, inplace=True)
timeToMarket = data['timeToMarket']
return timeToMarket
class LoadNewData(Data):
def __init__(self):
super(LoadNewData, self).__init__()
if __name__ == '__main__':
class LD(LoadInfo, LoadHistData, LoadNewData):
def __init__(self):
super(LD, self).__init__()
print LD.__mro__
ld = LD()
s = ld.GetHistData(code='600000', ktype='D', timeToMarket='20140230', range=4)
ld.GetAllHistData(range=3)
'''
# show:
import matplotlib.pyplot as plt
import matplotlib.finance as mpf
from matplotlib.pylab import date2num
def date_to_num(dates):
num_time = []
for date in dates:
date_time = datetime.datetime.strptime(date,'%Y-%m-%d')
num_date = date2num(date_time)
num_time.append(num_date)
return num_time
plt_data = s.as_matrix()
num_time = date_to_num(plt_data[:,0])
plt_data[:,0] = num_time
fig, ax = plt.subplots(facecolor=(0.5, 0.5, 0.5))
fig.subplots_adjust(bottom=0.2)
ax.xaxis_date()
plt.xticks(rotation=45)
plt.xlabel("time")
mpf.candlestick_ochl(ax, plt_data, width=0.6, colorup='g', colordown='r', alpha=0.8)
plt.grid(True)
plt.savefig('test_qfq.pdf')
plt.show()
'''
| mit | -237,900,982,935,462,240 | 27.045455 | 90 | 0.574824 | false |
PanDAWMS/autopyfactory | autopyfactory/utils.py | 1 | 5086 | #!/usr/bin/env python
"""
Convenience utilities for AutoPyFactory.
"""
import os
import signal
import subprocess
import threading
import time
class TimeOutException(Exception):
pass
class ExecutionFailedException(Exception):
pass
class TimedCommand(object):
"""
-----------------------------------------------------------------------
class to run shell commands.
It encapsulates calls to subprocess.Popen()
Can implement a timeout and abort execution if needed.
Can print a custom failure message and/or raise custom exceptions.
-----------------------------------------------------------------------
Public Interface:
__init__(): inherited from threading.Thread
self.output
self.error
self.status
self.pid
self.time
-----------------------------------------------------------------------
"""
def __init__(self, cmd, timeout=None, failure_msg=None, exception=None):
class SubProcess(threading.Thread):
def __init__(self, program):
threading.Thread.__init__(self)
self.program = program
self.output = None
self.error = None
self.status = None
self.pid = None
def run(self):
self.p = subprocess.Popen(self.program, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
self.pid = self.p.pid
self.output = self.p.stdout.read()
self.error = self.p.stderr.read()
self.status = self.p.wait()
self.timeout = timeout
self.failure_msg = failure_msg
self.exception = exception
self.cmd = SubProcess(cmd)
now = time.time()
self.run()
self.time = time.time() - now
self.checkoutput()
def run(self):
self.cmd.start()
if self.timeout:
while self.cmd.isAlive() and self.timeout > 0:
time.sleep(1)
self.timeout -= 1
if not self.timeout > 0:
os.kill(self.cmd.pid, signal.SIGKILL)
raise TimeOutException
self.cmd.join()
self.output = self.cmd.output
self.error = self.cmd.error
self.status = self.cmd.status
self.pid = self.cmd.pid
def checkoutput(self):
if self.status != 0:
if self.failure_msg:
print( self.failure_message)
if self.exception:
raise self.exception
def checkDaemon(daemon, pattern='running'):
"""
checks if a given daemon service is active
"""
cmd = 'service %s status' %daemon
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = subproc.communicate()
st = subproc.returncode
return out.lower().find(pattern) > 0
def which(file):
for path in os.environ["PATH"].split(":"):
if os.path.exists(path + "/" + file):
return path + "/" + file
class Container(object):
"""
generic class that is built from a dictionary content
"""
def __init__(self, input_d):
self.input_d = input_d
for k, v in self.input_d.items():
self.__dict__[k] = v
def __getattr__(self, name):
"""
Return None for non-existent attributes, otherwise behave normally.
"""
try:
return int(self.__getattribute__(name))
except AttributeError:
return None
def __str__(self):
s = 'Info Container ='
for k, v in self.input_d.items():
s += ' %: %s' %(k, v)
return s
def remap(d, mapping, add_f=lambda x,y: x+y):
"""
converts a dictionary into another dictionary
changing keys (and aggregating values)
based on a mappings dictionary
"""
out = {}
for k, v in d.items():
k = mapping[k]
if k not in out.keys():
out[k] = v
else:
out[k] = add_f(out[k], v)
return out
# ==================================================
if __name__ == "__main__":
try:
#cmd = CommandLine('ls -ltr /tmpp/', exception=ExecutionFailedException)
#cmd = CommandLine('ls -ltr /tmp/', exception=ExecutionFailedException)
cmd = CommandLine('for i in a b c d e f g h; do echo $i; sleep 1; done', 2)
#cmd = CommandLine('for i in a b c d e f g h; do echo $i; sleep 1; done')
print( '==================')
print( cmd.output )
print( '------------------')
print( cmd.error )
print( '------------------')
print( cmd.status )
print( '------------------')
print( cmd.pid )
print( '------------------')
print( cmd.time )
print( '==================')
except TimeOutException:
print( 'timeout' )
except ExecutionFailedException:
print( 'failed' )
| apache-2.0 | 3,674,523,256,239,117,300 | 26.945055 | 131 | 0.500983 | false |
danielkza/dnf-plugins-extras | tests/support.py | 4 | 2697 | # Copyright (C) 2014 Red Hat, Inc.
# Copyright (C) 2015 Igor Gnatenko
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import dnf
import logging
import sys
import unittest
PY3 = False
if sys.version_info.major >= 3:
PY3 = True
if PY3:
from unittest import mock
else:
from . import mock
class BaseStub(object):
def __init__(self):
self.sack = dnf.sack.Sack()
self.repos = dnf.repodict.RepoDict()
def add_remote_rpm(self, path):
self.sack.create_cmdline_repo()
return self.sack.add_cmdline_package(path)
class CliStub(object):
"""A class mocking `dnf.cli.Cli`."""
nogpgcheck = True
def __init__(self, base):
"""Initialize the CLI."""
self.base = base
self.cli_commands = {}
self.demands = DemandsStub()
self.logger = logging.getLogger()
self.register_command(dnf.cli.commands.HelpCommand)
def register_command(self, command):
"""Register given *command*."""
self.cli_commands.update({alias: command for alias in command.aliases})
class DemandsStub(object):
pass
class RepoStub(object):
"""A class mocking `dnf.repo.Repo`"""
enabled = True
def __init__(self, id_):
"""Initialize the repository."""
self.id = id_
def valid(self):
"""Return a message if the repository is not valid."""
def enable(self):
"""Enable the repo"""
self.enabled = True
def disable(self):
"""Disable the repo"""
self.enabled = False
class TestCase(unittest.TestCase):
def assertEmpty(self, collection):
return self.assertEqual(len(collection), 0)
if not PY3:
assertCountEqual = unittest.TestCase.assertItemsEqual
| gpl-2.0 | -1,394,013,695,452,810,000 | 27.691489 | 79 | 0.678161 | false |
OWASP/django-DefectDojo | dojo/endpoint/views.py | 1 | 16171 | # # endpoints
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils.html import escape
from django.utils import timezone
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
from dojo.filters import EndpointFilter
from dojo.forms import EditEndpointForm, \
DeleteEndpointForm, AddEndpointForm, DojoMetaDataForm
from dojo.models import Product, Endpoint, Finding, System_Settings, DojoMeta
from dojo.utils import get_page_items, add_breadcrumb, get_period_counts, get_system_setting, Product_Tab, calculate_grade, create_notification
logger = logging.getLogger(__name__)
def vulnerable_endpoints(request):
endpoints = Endpoint.objects.filter(finding__active=True, finding__verified=True, finding__false_p=False,
finding__duplicate=False, finding__out_of_scope=False, remediated=False).distinct()
# are they authorized
if request.user.is_staff:
pass
else:
products = Product.objects.filter(authorized_users__in=[request.user])
if products.exists():
endpoints = endpoints.filter(product__in=products.all())
else:
raise PermissionDenied
product = None
if 'product' in request.GET:
p = request.GET.getlist('product', [])
if len(p) == 1:
product = get_object_or_404(Product, id=p[0])
ids = get_endpoint_ids(EndpointFilter(request.GET, queryset=endpoints, user=request.user).qs)
endpoints = EndpointFilter(request.GET, queryset=endpoints.filter(id__in=ids), user=request.user)
endpoints_query = endpoints.qs.order_by('host')
paged_endpoints = get_page_items(request, endpoints_query, 25)
add_breadcrumb(title="Vulnerable Endpoints", top_level=not len(request.GET), request=request)
system_settings = System_Settings.objects.get()
product_tab = None
view_name = "All Endpoints"
if product:
product_tab = Product_Tab(product.id, "Vulnerable Endpoints", tab="endpoints")
return render(
request, 'dojo/endpoints.html', {
'product_tab': product_tab,
"endpoints": paged_endpoints,
"filtered": endpoints,
"name": "Vulnerable Endpoints",
})
def all_endpoints(request):
endpoints = Endpoint.objects.all()
show_uri = get_system_setting('display_endpoint_uri')
# are they authorized
if request.user.is_staff:
pass
else:
products = Product.objects.filter(authorized_users__in=[request.user])
if products.exists():
endpoints = endpoints.filter(product__in=products.all())
else:
raise PermissionDenied
product = None
if 'product' in request.GET:
p = request.GET.getlist('product', [])
if len(p) == 1:
product = get_object_or_404(Product, id=p[0])
if show_uri:
endpoints = EndpointFilter(request.GET, queryset=endpoints, user=request.user)
paged_endpoints = get_page_items(request, endpoints.qs, 25)
else:
ids = get_endpoint_ids(EndpointFilter(request.GET, queryset=endpoints, user=request.user).qs)
endpoints = EndpointFilter(request.GET, queryset=endpoints.filter(id__in=ids), user=request.user)
paged_endpoints = get_page_items(request, endpoints.qs, 25)
add_breadcrumb(title="All Endpoints", top_level=not len(request.GET), request=request)
product_tab = None
view_name = "All Endpoints"
if product:
view_name = "Endpoints"
product_tab = Product_Tab(product.id, "Endpoints", tab="endpoints")
return render(
request, 'dojo/endpoints.html', {
'product_tab': product_tab,
"endpoints": paged_endpoints,
"filtered": endpoints,
"name": view_name,
"show_uri": show_uri
})
def get_endpoint_ids(endpoints):
hosts = []
ids = []
for e in endpoints:
if ":" in e.host:
host_no_port = e.host[:e.host.index(':')]
else:
host_no_port = e.host
key = host_no_port + '-' + str(e.product.id)
if key in hosts:
continue
else:
hosts.append(key)
ids.append(e.id)
return ids
def view_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, id=eid)
host = endpoint.host_no_port
endpoints = Endpoint.objects.filter(host__regex="^" + host + ":?",
product=endpoint.product).distinct()
if (request.user in endpoint.product.authorized_users.all()) or request.user.is_staff:
pass
else:
raise PermissionDenied
endpoint_metadata = dict(endpoint.endpoint_meta.values_list('name', 'value'))
all_findings = Finding.objects.filter(endpoints__in=endpoints).distinct()
active_findings = Finding.objects.filter(endpoints__in=endpoints,
active=True,
verified=True).distinct()
closed_findings = Finding.objects.filter(endpoints__in=endpoints,
mitigated__isnull=False).distinct()
if all_findings:
start_date = timezone.make_aware(datetime.combine(all_findings.last().date, datetime.min.time()))
else:
start_date = timezone.now()
end_date = timezone.now()
r = relativedelta(end_date, start_date)
months_between = (r.years * 12) + r.months
# include current month
months_between += 1
monthly_counts = get_period_counts(active_findings, all_findings, closed_findings, None, months_between, start_date,
relative_delta='months')
paged_findings = get_page_items(request, active_findings, 25)
vulnerable = False
if active_findings.count() != 0:
vulnerable = True
product_tab = Product_Tab(endpoint.product.id, "Endpoint", tab="endpoints")
return render(request,
"dojo/view_endpoint.html",
{"endpoint": endpoint,
'product_tab': product_tab,
"endpoints": endpoints,
"findings": paged_findings,
'all_findings': all_findings,
'opened_per_month': monthly_counts['opened_per_period'],
'endpoint_metadata': endpoint_metadata,
'vulnerable': vulnerable,
})
@user_passes_test(lambda u: u.is_staff)
def edit_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, id=eid)
if request.method == 'POST':
form = EditEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
endpoint = form.save()
tags = request.POST.getlist('tags')
t = ", ".join(tags)
endpoint.tags = t
messages.add_message(request,
messages.SUCCESS,
'Endpoint updated successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_endpoint', args=(endpoint.id,)))
add_breadcrumb(parent=endpoint, title="Edit", top_level=False, request=request)
form = EditEndpointForm(instance=endpoint)
form.initial['tags'] = [tag.name for tag in endpoint.tags]
product_tab = Product_Tab(endpoint.product.id, "Endpoint", tab="endpoints")
return render(request,
"dojo/edit_endpoint.html",
{"endpoint": endpoint,
'product_tab': product_tab,
"form": form,
})
@user_passes_test(lambda u: u.is_staff)
def delete_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, pk=eid)
product = endpoint.product
form = DeleteEndpointForm(instance=endpoint)
if request.method == 'POST':
if 'id' in request.POST and str(endpoint.id) == request.POST['id']:
form = DeleteEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
del endpoint.tags
endpoint.delete()
messages.add_message(request,
messages.SUCCESS,
'Endpoint and relationships removed.',
extra_tags='alert-success')
create_notification(event='other',
title='Deletion of %s' % endpoint,
description='The endpoint "%s" was deleted by %s' % (endpoint, request.user),
url=request.build_absolute_uri(reverse('endpoints')),
icon="exclamation-triangle")
return HttpResponseRedirect(reverse('view_product', args=(product.id,)))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([endpoint])
rels = collector.nested()
product_tab = Product_Tab(endpoint.product.id, "Delete Endpoint", tab="endpoints")
return render(request, 'dojo/delete_endpoint.html',
{'endpoint': endpoint,
'product_tab': product_tab,
'form': form,
'rels': rels,
})
@user_passes_test(lambda u: u.is_staff)
def add_endpoint(request, pid):
product = get_object_or_404(Product, id=pid)
template = 'dojo/add_endpoint.html'
if '_popup' in request.GET:
template = 'dojo/add_related.html'
form = AddEndpointForm(product=product)
if request.method == 'POST':
form = AddEndpointForm(request.POST, product=product)
if form.is_valid():
endpoints = form.save()
tags = request.POST.getlist('tags')
t = ", ".join(tags)
for e in endpoints:
e.tags = t
messages.add_message(request,
messages.SUCCESS,
'Endpoint added successfully.',
extra_tags='alert-success')
if '_popup' in request.GET:
resp = '<script type="text/javascript">opener.emptyEndpoints(window);</script>'
for endpoint in endpoints:
resp += '<script type="text/javascript">opener.dismissAddAnotherPopupDojo(window, "%s", "%s");</script>' \
% (escape(endpoint._get_pk_val()), escape(endpoint))
resp += '<script type="text/javascript">window.close();</script>'
return HttpResponse(resp)
else:
return HttpResponseRedirect(reverse('endpoints') + "?product=" + pid)
product_tab = None
if '_popup' not in request.GET:
product_tab = Product_Tab(product.id, "Add Endpoint", tab="endpoints")
return render(request, template, {
'product_tab': product_tab,
'name': 'Add Endpoint',
'form': form})
@user_passes_test(lambda u: u.is_staff)
def add_product_endpoint(request):
form = AddEndpointForm()
if request.method == 'POST':
form = AddEndpointForm(request.POST)
if form.is_valid():
endpoints = form.save()
tags = request.POST.getlist('tags')
t = ", ".join(tags)
for e in endpoints:
e.tags = t
messages.add_message(request,
messages.SUCCESS,
'Endpoint added successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('endpoints') + "?product=%s" % form.product.id)
add_breadcrumb(title="Add Endpoint", top_level=False, request=request)
return render(request,
'dojo/add_endpoint.html',
{'name': 'Add Endpoint',
'form': form,
})
@user_passes_test(lambda u: u.is_staff)
def add_meta_data(request, eid):
endpoint = Endpoint.objects.get(id=eid)
if request.method == 'POST':
form = DojoMetaDataForm(request.POST, instance=DojoMeta(endpoint=endpoint))
if form.is_valid():
form.save()
messages.add_message(request,
messages.SUCCESS,
'Metadata added successfully.',
extra_tags='alert-success')
if 'add_another' in request.POST:
return HttpResponseRedirect(reverse('add_meta_data', args=(eid,)))
else:
return HttpResponseRedirect(reverse('view_endpoint', args=(eid,)))
else:
form = DojoMetaDataForm()
add_breadcrumb(parent=endpoint, title="Add Metadata", top_level=False, request=request)
product_tab = Product_Tab(endpoint.product.id, "Add Metadata", tab="endpoints")
return render(request,
'dojo/add_endpoint_meta_data.html',
{'form': form,
'product_tab': product_tab,
'endpoint': endpoint,
})
@user_passes_test(lambda u: u.is_staff)
def edit_meta_data(request, eid):
endpoint = Endpoint.objects.get(id=eid)
if request.method == 'POST':
for key, value in request.POST.items():
if key.startswith('cfv_'):
cfv_id = int(key.split('_')[1])
cfv = get_object_or_404(DojoMeta, id=cfv_id)
value = value.strip()
if value:
cfv.value = value
cfv.save()
else:
cfv.delete()
messages.add_message(request,
messages.SUCCESS,
'Metadata edited successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_endpoint', args=(eid,)))
product_tab = Product_Tab(endpoint.product.id, "Edit Metadata", tab="endpoints")
return render(request,
'dojo/edit_endpoint_meta_data.html',
{'endpoint': endpoint,
'product_tab': product_tab,
})
@user_passes_test(lambda u: u.is_staff)
def endpoint_bulk_update_all(request, pid=None):
if request.method == "POST":
endpoints_to_update = request.POST.getlist('endpoints_to_update')
if request.POST.get('delete_bulk_endpoints') and endpoints_to_update:
finds = Endpoint.objects.filter(id__in=endpoints_to_update)
product_calc = list(Product.objects.filter(endpoint__id__in=endpoints_to_update).distinct())
finds.delete()
for prod in product_calc:
calculate_grade(prod)
else:
if endpoints_to_update:
endpoints_to_update = request.POST.getlist('endpoints_to_update')
finds = Endpoint.objects.filter(id__in=endpoints_to_update).order_by("endpoint_meta__product__id")
for endpoint in finds:
endpoint.remediated = not endpoint.remediated
endpoint.save()
messages.add_message(request,
messages.SUCCESS,
'Bulk edit of endpoints was successful. Check to make sure it is what you intended.',
extra_tags='alert-success')
else:
# raise Exception('STOP')
messages.add_message(request,
messages.ERROR,
'Unable to process bulk update. Required fields were not selected.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('endpoints', args=()))
| bsd-3-clause | -785,144,914,726,463,500 | 39.528822 | 143 | 0.573619 | false |
ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/objects/instance_action.py | 10 | 8511 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import timeutils
class InstanceAction(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'action': fields.StringField(nullable=True),
'instance_uuid': fields.UUIDField(nullable=True),
'request_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'start_time': fields.DateTimeField(nullable=True),
'finish_time': fields.DateTimeField(nullable=True),
'message': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, action, db_action):
for field in action.fields:
action[field] = db_action[field]
action._context = context
action.obj_reset_changes()
return action
@staticmethod
def pack_action_start(context, instance_uuid, action_name):
values = {'request_id': context.request_id,
'instance_uuid': instance_uuid,
'user_id': context.user_id,
'project_id': context.project_id,
'action': action_name,
'start_time': context.timestamp}
return values
@staticmethod
def pack_action_finish(context, instance_uuid):
values = {'request_id': context.request_id,
'instance_uuid': instance_uuid,
'finish_time': timeutils.utcnow()}
return values
@base.remotable_classmethod
def get_by_request_id(cls, context, instance_uuid, request_id):
db_action = db.action_get_by_request_id(context, instance_uuid,
request_id)
if db_action:
return cls._from_db_object(context, cls(), db_action)
@base.remotable_classmethod
def action_start(cls, context, instance_uuid, action_name,
want_result=True):
values = cls.pack_action_start(context, instance_uuid, action_name)
db_action = db.action_start(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_action)
@base.remotable_classmethod
def action_finish(cls, context, instance_uuid, want_result=True):
values = cls.pack_action_finish(context, instance_uuid)
db_action = db.action_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_action)
@base.remotable
def finish(self, context):
values = self.pack_action_finish(context, self.instance_uuid)
db_action = db.action_finish(context, values)
self._from_db_object(context, self, db_action)
class InstanceActionList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceAction <= version 1.1
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('InstanceAction'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): InstanceAction was at 1.1 before we added this
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_actions = db.actions_get(context, instance_uuid)
return base.obj_make_list(context, cls(), InstanceAction, db_actions)
class InstanceActionEvent(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: event_finish_with_failure decorated with serialize_args
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'event': fields.StringField(nullable=True),
'action_id': fields.IntegerField(nullable=True),
'start_time': fields.DateTimeField(nullable=True),
'finish_time': fields.DateTimeField(nullable=True),
'result': fields.StringField(nullable=True),
'traceback': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
event[field] = db_event[field]
event._context = context
event.obj_reset_changes()
return event
@staticmethod
def pack_action_event_start(context, instance_uuid, event_name):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
@staticmethod
def pack_action_event_finish(context, instance_uuid, event_name,
exc_val=None, exc_tb=None):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = exc_val
values['traceback'] = exc_tb
return values
@base.remotable_classmethod
def get_by_id(cls, context, action_id, event_id):
db_event = db.action_event_get_by_id(context, action_id, event_id)
return cls._from_db_object(context, cls(), db_event)
@base.remotable_classmethod
def event_start(cls, context, instance_uuid, event_name, want_result=True):
values = cls.pack_action_event_start(context, instance_uuid,
event_name)
db_event = db.action_event_start(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_event)
@base.serialize_args
@base.remotable_classmethod
def event_finish_with_failure(cls, context, instance_uuid, event_name,
exc_val=None, exc_tb=None, want_result=None):
values = cls.pack_action_event_finish(context, instance_uuid,
event_name, exc_val=exc_val,
exc_tb=exc_tb)
db_event = db.action_event_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_event)
@base.remotable_classmethod
def event_finish(cls, context, instance_uuid, event_name,
want_result=True):
return cls.event_finish_with_failure(context, instance_uuid,
event_name, exc_val=None,
exc_tb=None,
want_result=want_result)
@base.remotable
def finish_with_failure(self, context, exc_val, exc_tb):
values = self.pack_action_event_finish(context, self.instance_uuid,
self.event, exc_val=exc_val,
exc_tb=exc_tb)
db_event = db.action_event_finish(context, values)
self._from_db_object(context, self, db_event)
@base.remotable
def finish(self, context):
self.finish_with_failure(context, exc_val=None, exc_tb=None)
class InstanceActionEventList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('InstanceActionEvent'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
}
@base.remotable_classmethod
def get_by_action(cls, context, action_id):
db_events = db.action_events_get(context, action_id)
return base.obj_make_list(context, cls(context),
objects.InstanceActionEvent, db_events)
| gpl-2.0 | -4,201,443,530,869,758,000 | 38.771028 | 79 | 0.600752 | false |
madarou/angular-django | app/myblog/urls.py | 1 | 1385 | from django.conf.urls import patterns, url
from app.myblog import views
urlpatterns = patterns('',
#django url
url(r'^$', views.index, name='index'),
url(r'^code/$', views.code, name='code'),
url(r'^lab/$', views.lab, name='lab'),
url(r'^blog/$', views.blog, name='blog'),
url(r'^knowledge/$', views.know, name='know'),
url(r'^me/$', views.about, name='about'),
url(r'^classify/(?P<bid>(\d)+)/$', views.classify, name='classify'),
url(r'^blog/(?P<bid>(\d)+)/$', views.blog, name='blog_with_id'),
url(r'^tag/(?P<bid>(\d)+)/$', views.tag, name='tag'),
#json url
url(r'^blog/comment/(?P<bid>(.*)+)/$', views.submit_comment, name='submit_comment'),
url(r'^get_knowledge/(?P<text>(.*)+)/$', views.get_know, name='know_json_text'),
url(r'^get_knowledge/$', views.get_know, name='know_json'),
url(r'^detail/$', views.detail, name='detail_json'),
url(r'^tools/$', views.get_tools, name='tools_json'),
url(r'^detail/(?P<bid>\d+)/$', views.detail, name='detail_json_id'),
url(r'^bd/$', views.get_blog, name='blog_json'),
url(r'^refresh_verify/$', views.refresh_verify, name='verify_json'),
url(r'^get_tag/(?P<tid>(.*)+)/$', views.get_tag, name='tag_json'),
url(r'^get_classify/(?P<cid>(.*)+)/$', views.get_classify, name='classify_json'),
url(r'^index_content/$', views.get_index, name='index_json'),
)
| bsd-2-clause | 4,079,235,829,388,876,000 | 45.166667 | 88 | 0.586282 | false |
EloYGomeZ/android-kenel-3.4.0 | scripts/build-all.py | 11 | 10171 | #! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'omap2*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 | -5,368,241,596,461,078,000 | 34.072414 | 80 | 0.583718 | false |
bkpathak/HackerRank-Problems | leet-code/recurring_fraction.py | 2 | 1088 | # To force Pythn 2.7 for floating point division
# from __future__ import division
import collections
import math
def recurring_fraction(numerator,denominator):
if numerator == 0:
return 0
if denominator == 0:
return ''
# Check if the numeratorbers are negative
result = ''
if ((numerator < 0) ^ (denominator < 0)):
result = "-"
numerator = abs(numeratorerator)
denominator = abs(denominatorominator)
res = numerator // denominator
result += str(res)
remainder = (num % denominator) * 10
if (remainder == 0):
return result
dict = {}
result += "."
while( remainder != 0):
if remainder in dict:
beg = dict.get(remainder)
part1 = result[0:beg]
part2 = result[beg:len(result)]
result = part1 + '(' + part2 + ')'
return result
dict[remainder] = len(result)
res = remainder // denominator
result += str(res)
remainder = (remainder % denominator) * 10
return result
print(recurring_fraction(1,2))
| mit | 7,396,879,520,662,295,000 | 26.2 | 50 | 0.581801 | false |
inducer/django-bootstrap3-datetimepicker | setup.py | 1 | 1143 | from setuptools import setup
setup(
name='django-bootstrap3-datetimepicker',
packages=['bootstrap3_datetime',],
package_data={'bootstrap3_datetime': ['static/bootstrap3_datetime/css/*.css',
'static/bootstrap3_datetime/js/*.js', ]},
include_package_data=True,
version='2.3',
description='Bootstrap3 compatible datetimepicker for Django projects.',
long_description=open('README.rst').read(),
author='Nakahara Kunihiko',
author_email='[email protected]',
url='https://github.com/gcaprio/django-bootstrap3-datetimepicker.git',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Environment :: Web Environment',
'Framework :: Django',
],
zip_safe=False,
)
| apache-2.0 | -3,795,952,842,707,160,000 | 37.1 | 83 | 0.628171 | false |
marcocor/semantic-view | src/main/python/annotate_jira.py | 1 | 4126 | from lxml import etree
from html2text import html2text
import dateparser
import unicodecsv as csv
import os
import logging
import argparse
import tagme
import re
from scipy.weave.converters import default
MAIL_REGEX = r"\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}"
URL_REGEX = r"(https?|ftp)://[^\s/$.?#].[^\s]*"
def clean_text(text):
return re.sub("({})|({})".format(MAIL_REGEX, URL_REGEX), " ", text)
def item_to_data(item):
key = item.findtext("key")
title = item.findtext("title")
body = clean_text(html2text(item.findtext("description")))
time_str = item.xpath('./customfields/customfield[customfieldname = "Data invio mail"]/customfieldvalues/customfieldvalue/text()')[0]
time = dateparser.parse(time_str)
if not time:
time = dateparser.parse(time_str[4:])
if not time:
return None
logging.warning("Could not parse date {} in document {}".format(time_str, key))
return (key, title, body, time.isoformat())
def get_documents(xml_file):
tree = etree.parse(xml_file)
if tree.getroot() is None:
return None
items = tree.xpath('//item[.//customfieldname = "Data invio mail"]')
return filter(lambda item: item is not None, map(item_to_data, items))
DOCS_CSV_FIELDS = ['key', 'title', 'body', 'time']
ENTITIES_CSV_FIELDS = ['key', 'entity', 'score', 'time']
DOCS_SUBFOLDER = 'docs'
ENTITIES_SUBFOLDER = 'entities'
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--infiles', dest='infiles', nargs='+', help='Jira XML input files.')
parser.add_argument('--outdir', dest='outdir', help='Output directory of CSV files containing annotations.')
parser.add_argument('--gcube-token', dest='gcube_token', help='Gcube authentication token to access Tagme API.')
parser.add_argument('--lang', dest='lang', default='en', help='Language of the documents. Must be accepted by Tagme.')
args = parser.parse_args()
docs_path_base = os.path.join(args.outdir, DOCS_SUBFOLDER)
entities_path_base = os.path.join(args.outdir, ENTITIES_SUBFOLDER)
if not os.path.isdir(docs_path_base):
os.makedirs(docs_path_base)
if not os.path.isdir(entities_path_base):
os.makedirs(entities_path_base)
for xml_file in args.infiles:
logging.info("Processing {}".format(xml_file))
for i, doc in enumerate(get_documents(xml_file)):
if doc is None:
logging.warning("Could not parse document {} from {}".format(i, xml_file))
continue
key, title, body, time = doc
doc_path = "{}.csv".format(os.path.join(docs_path_base, key))
entities_path = "{}.csv".format(os.path.join(entities_path_base, key))
if (os.path.isfile(doc_path) and os.path.isfile(entities_path)):
logging.info("Document {} already annotated, skipping.".format(key))
continue
logging.info("Annotating document key={} length={} ({})".format(key, len(body), xml_file))
tagme_response = tagme.annotate(u'{} {}'.format(title, body), args.gcube_token, lang=args.lang)
if not tagme_response:
logging.warning("Could not annoate document {} from {} (key {})".format(i, xml_file, key))
continue
annotations = tagme_response.get_annotations(min_rho=0.2)
logging.info("Found {} annotations".format(len(annotations)))
with open(doc_path, 'wb') as csv_doc_out:
w = csv.DictWriter(csv_doc_out, encoding='utf-8', fieldnames=DOCS_CSV_FIELDS)
w.writerow({'key': key, 'title': title, 'body': body, 'time': time})
with open(entities_path, 'wb') as csv_entities_out:
w = csv.DictWriter(csv_entities_out, encoding='utf-8', fieldnames=ENTITIES_CSV_FIELDS)
for annotation in annotations:
w.writerow({'key': key, 'entity': annotation.entity_title, 'score': annotation.score, 'time': time})
| apache-2.0 | -3,442,580,906,351,728,000 | 43.365591 | 137 | 0.619244 | false |
wnesl/gnuradio-IA | gr-trellis/src/examples/python/test_turbo_equalization.py | 10 | 5525 | #!/usr/bin/env python
from gnuradio import gr
from gnuradio import trellis, digital
from gnuradio import eng_notation
import math
import sys
import fsm_utils
def make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,type):
metrics_in = trellis.metrics_f(fi.O(),dimensionality,tot_constellation,digital.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for innner SISO
scale = gr.multiply_const_ff(1.0/N0)
gnd = gr.vector_source_f([0],True);
inter=[]
deinter=[]
siso_in=[]
siso_out=[]
# generate all blocks
for it in range(IT):
inter.append( trellis.permutation(interleaver.K(),interleaver.INTER(),fi.I(),gr.sizeof_float) )
siso_in.append( trellis.siso_f(fi,K,0,-1,True,False,type) )
deinter.append( trellis.permutation(interleaver.K(),interleaver.DEINTER(),fi.I(),gr.sizeof_float) )
if it < IT-1:
siso_out.append( trellis.siso_f(fo,K,0,-1,False,True,type) )
else:
siso_out.append( trellis.viterbi_s(fo,K,0,-1) ) # no soft outputs needed
# connect first stage
tb.connect (gnd,inter[0])
tb.connect (metrics_in,scale)
tb.connect (scale,(siso_in[0],1))
# connect the rest
for it in range(IT):
if it < IT-1:
tb.connect (metrics_in,(siso_in[it+1],1))
tb.connect (siso_in[it],deinter[it],(siso_out[it],1))
tb.connect (gnd,(siso_out[it],0))
tb.connect (siso_out[it],inter[it+1])
tb.connect (inter[it],(siso_in[it],0))
else:
tb.connect (siso_in[it],deinter[it],siso_out[it])
tb.connect (inter[it],(siso_in[it],0))
return (metrics_in,siso_out[IT-1])
def run_test (fo,fi,interleaver,Kb,bitspersymbol,K,dimensionality,tot_constellation,Es,N0,IT,seed):
tb = gr.top_block ()
# TX
src = gr.lfsr_32k_source_s()
src_head = gr.head (gr.sizeof_short,Kb/16) # packet size in shorts
s2fsmi = gr.packed_to_unpacked_ss(bitspersymbol,gr.GR_MSB_FIRST) # unpack shorts to symbols compatible with the iouter FSM input cardinality
enc_out = trellis.encoder_ss(fo,0) # initial state = 0
inter = trellis.permutation(interleaver.K(),interleaver.INTER(),1,gr.sizeof_short)
enc_in = trellis.encoder_ss(fi,0) # initial state = 0
# essentially here we implement the combination of modulation and channel as a memoryless modulation (the memory induced by the channel is hidden in the innner FSM)
mod = gr.chunks_to_symbols_sf(tot_constellation,dimensionality)
# CHANNEL
add = gr.add_ff()
noise = gr.noise_source_f(gr.GR_GAUSSIAN,math.sqrt(N0/2),seed)
# RX
(head,tail) = make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,trellis.TRELLIS_MIN_SUM)
fsmi2s = gr.unpacked_to_packed_ss(bitspersymbol,gr.GR_MSB_FIRST) # pack FSM input symbols to shorts
dst = gr.check_lfsr_32k_s();
tb.connect (src,src_head,s2fsmi,enc_out,inter,enc_in,mod)
tb.connect (mod,(add,0))
tb.connect (noise,(add,1))
tb.connect (add,head)
tb.connect (tail,fsmi2s,dst)
tb.run()
ntotal = dst.ntotal ()
nright = dst.nright ()
runlength = dst.runlength ()
#print ntotal,nright,runlength
return (ntotal,ntotal-nright)
def main(args):
nargs = len (args)
if nargs == 3:
fname_out=args[0]
esn0_db=float(args[1])
rep=int(args[2])
else:
sys.stderr.write ('usage: test_turbo_equalization.py fsm_name_out Es/No_db repetitions\n')
sys.exit (1)
# system parameters
Kb=64*16 # packet size in bits (multiple of 16)
modulation = fsm_utils.pam4 # see fsm_utlis.py for available predefined modulations
channel = fsm_utils.c_channel # see fsm_utlis.py for available predefined test channels
fo=trellis.fsm(fname_out) # get the outer FSM specification from a file
fi=trellis.fsm(len(modulation[1]),len(channel)) # generate the FSM automatically
if fo.O() != fi.I():
sys.stderr.write ('Incompatible cardinality between outer and inner FSM.\n')
sys.exit (1)
bitspersymbol = int(round(math.log(fo.I())/math.log(2))) # bits per FSM input symbol
K=Kb/bitspersymbol # packet size in trellis steps
print 'size = ',K
interleaver=trellis.interleaver(K,666) # construct a random interleaver
tot_channel = fsm_utils.make_isi_lookup(modulation,channel,True) # generate the lookup table (normalize energy to 1)
dimensionality = tot_channel[0]
tot_constellation = tot_channel[1]
if len(tot_constellation)/dimensionality != fi.O():
sys.stderr.write ('Incompatible FSM output cardinality and lookup table size.\n')
sys.exit (1)
N0=pow(10.0,-esn0_db/10.0); # noise variance
IT = 3 # number of turbo iterations
tot_s=0 # total number of transmitted shorts
terr_s=0 # total number of shorts in error
terr_p=0 # total number of packets in error
for i in range(rep):
(s,e)=run_test(fo,fi,interleaver,Kb,bitspersymbol,K,dimensionality,tot_constellation,1,N0,IT,-long(666+i)) # run experiment with different seed to get different noise realizations
print s
tot_s=tot_s+s
terr_s=terr_s+e
terr_p=terr_p+(terr_s!=0)
if ((i+1)%10==0) : # display progress
print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
# estimate of the (short or bit) error rate
print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
if __name__ == '__main__':
main (sys.argv[1:])
| gpl-3.0 | -7,955,564,478,400,741,000 | 37.908451 | 187 | 0.65629 | false |
rubikloud/gpdb | gpMgmt/bin/gppylib/datetimeutils.py | 50 | 19603 | #!/usr/bin/env python
# $Id: $
# $Change: $
# $DateTime: $
# $Author: $
"""
Date and time input conversion
Module contents:
---- common
class DatetimeValueError - subclass of ValueError
---- datetime or date
datetime_syntax_msg - a string briefly describing the input format
str_to_datetime() - Interprets a string as a date and (optional) time.
scan_datetime() - Consumes a date and time from a string.
---- timedelta
signed_duration_syntax_msg - a string briefly describing the input format
unsigned_duration_syntax_msg - a string briefly describing the input format
str_to_duration() - Interprets a string as a duration or length of time.
scan_duration() - Consumes a duration from a string.
Examples:
from lib.datetimeutils import str_to_datetime
import datetime
dt = str_to_datetime('2008-07-13 14:15:16.1')
dt = str_to_datetime('20080713 141516.123456')
dt = str_to_datetime(' 20080713T1415 ')
d = str_to_datetime('2008-7-4')
from lib.datetimeutils import str_to_duration
import timedelta
td = str_to_duration('1') # one hour
td = str_to_duration(' 1:2:3 ') # one hour, two minutes and three seconds
td = str_to_duration(':2:3.4') # two minutes and 3.4 seconds
td = str_to_duration('-72') # negative 72 hours (normalized to -3 days)
The datetime syntax is based on ISO 8601 and RFC 3339. See...
http://www.rfc.net/rfc3339.html
http://hydracen.com/dx/iso8601.htm
http://www.cl.cam.ac.uk/~mgk25/iso-time.html
In the docstrings, the examples preceded by '>>>' can be executed and verified
automatically by the 'doctest' package of the Python standard library.
"""
from datetime import date, datetime, timedelta
import re
#--------------------------------- common ---------------------------------
class DatetimeValueError (ValueError):
"""
Error on conversion from string to date, datetime, or timedelta.
DatetimeValueError fields:
description
pos
endpos
badness
"""
def __init__(self, description, string, pos=None, endpos=None):
# Save enough info in the exception object so that a caller's
# exception handler can create a differently formatted message
# in case they don't like the format we provide.
global _spacepat
if endpos is None:
endpos = len(string or '')
if pos and string:
# avoid copying all of string[pos:endpos], in case it is big
p = _spacepat.match(string, pos, endpos).end()
if endpos > p+30:
badness = string[p:p+27].rstrip(' \t') + '...'
else:
badness = string[p:endpos].rstrip(' \t')
else:
badness = string
self.description, self.pos, self.endpos, self.badness = description, pos, endpos, badness
if badness:
description = '"%s" ... %s' % (badness, description)
ValueError.__init__(self, description)
#-------------------------------- datetime --------------------------------
datetime_syntax_msg = ('Specify date and time as "YYYY-MM-DD[ HH:MM[:SS[.S]]" '
'or "YYYYMMDD[ HHMM[SS[.S]]]". If both date and time '
'are given, a space or letter "T" must separate them.')
def str_to_datetime(string, pos=0, endpos=None):
"""
Interprets string[pos:endpos] as a date and (optional) time.
Returns a datetime.datetime object if string[pos:endpos] contains a valid
date and time. Returns a datetime.date object for a date with no time.
Raises DatetimeValueError if string does not contain a valid date, or
contains an invalid time or anything else other than whitespace.
Examples...
# delimited format
>>> str_to_datetime('2008-7-13')
datetime.date(2008, 7, 13)
>>> str_to_datetime('2008-7-13 2:15')
datetime.datetime(2008, 7, 13, 2, 15)
>>> str_to_datetime(' 2008-07-13 14:15:16.123456789 ')
datetime.datetime(2008, 7, 13, 14, 15, 16, 123456)
# numeric format
>>> str_to_datetime(' 20080713 ')
datetime.date(2008, 7, 13)
>>> str_to_datetime('20080713 141516.123')
datetime.datetime(2008, 7, 13, 14, 15, 16, 123000)
# slicing
>>> str_to_datetime('9200807139', 1, 9)
datetime.date(2008, 7, 13)
# 'T' can separate date and time
>>> str_to_datetime('2008-7-3t2:15')
datetime.datetime(2008, 7, 3, 2, 15)
>>> str_to_datetime(' 20080713T1415 ')
datetime.datetime(2008, 7, 13, 14, 15)
Errors... (for more, see scan_datetime() below)
>>> str_to_datetime('')
Traceback (most recent call last):
DatetimeValueError: Specify date and time as "YYYY-MM-DD[ HH:MM[:SS[.S]]" or "YYYYMMDD[ HHMM[SS[.S]]]". If both date and time are given, a space or letter "T" must separate them.
>>> str_to_datetime('nogood')
Traceback (most recent call last):
DatetimeValueError: "nogood" ... Specify date and time as "YYYY-MM-DD[ HH:MM[:SS[.S]]" or "YYYYMMDD[ HHMM[SS[.S]]]". If both date and time are given, a space or letter "T" must separate them.
>>> str_to_datetime(' 20080713 T1415 ') # can't have spaces around 'T'
Traceback (most recent call last):
DatetimeValueError: " 20080713 T1415 " ... date is followed by unrecognized "T1415"
>>> str_to_datetime('2008-7-13 2:15 &')
Traceback (most recent call last):
DatetimeValueError: "2008-7-13 2:15 &" ... date and time are followed by unrecognized "&"
>>> str_to_datetime('2008-7-13 abcdefghijklmnop')
Traceback (most recent call last):
DatetimeValueError: "2008-7-13 abcdefghijklmnop" ... date is followed by unrecognized "abcdefghi"
"""
global datetime_syntax_msg
if endpos is None:
endpos = len(string)
value, nextpos = scan_datetime(string, pos, endpos)
if value is None:
# string is empty or doesn't conform to the date syntax we require
raise DatetimeValueError(datetime_syntax_msg, string, pos, endpos)
elif nextpos < endpos and not string[nextpos:endpos].isspace():
# got valid date or datetime, but there is something more after it
if hasattr(value, 'hour'):
msg = 'date and time are followed by unrecognized "%s"'
else:
msg = 'date is followed by unrecognized "%s"'
msg %= string[nextpos:min(nextpos+10,endpos)].strip()
raise DatetimeValueError(msg, string, pos, endpos)
return value
def scan_datetime(string, pos=0, endpos=None):
"""
Consumes an initial substring of the slice string[pos:endpos] which
represents a date and (optional) time. Leading whitespace is ignored.
Raises DatetimeValueError if the text syntactically resembles a date or
datetime but fails semantic checks (e.g. field out of range, such as the
day in '2008-02-30').
Otherwise returns a tuple (value, nextpos) in which 'value' is either a
datetime.datetime object, a datetime.date object, or None; and 'nextpos'
is the index of the next character of string (pos <= nextpos <= endpos).
(None, pos) is returned if the beginning of the string (or slice) does
not conform to either the delimited date format YYYY-[M]M-[D]D or the
numeric date format YYYYMMDD.
(dt, nextpos) in which dt is a datetime.datetime object, is returned if
a valid date and time are found. At least one space or tab, or else the
single letter 'T' or 't' with no whitespace, must separate date and time.
Time must be delimited using colons [H]H:MM:[SS[.fraction]] if the
date is delimited, or must be numeric HHMM[SS[.fraction]] if the date is
numeric. The fraction may be any number of digits (at most 6 are kept).
(d, nextpos) in which d is a datetime.date object, is returned if a
valid date is found but the subsequent characters do not syntactically
resemble a time in the required format.
Examples... (for other syntax examples, see str_to_datetime() above)
>>> scan_datetime('2008-6-30')
(datetime.date(2008, 6, 30), 9)
>>> scan_datetime(' 2008-07-13 14:15:16 ')
(datetime.datetime(2008, 7, 13, 14, 15, 16), 20)
>>> scan_datetime(' nogood ')
(None, 0)
Errors...
>>> scan_datetime('2008-06-31')
Traceback (most recent call last):
DatetimeValueError: "2008-06-31" ... day is out of range for month
>>> scan_datetime('2008-07-13 24:00')
Traceback (most recent call last):
DatetimeValueError: "2008-07-13 24:00" ... hour must be in 0..23
>>> scan_datetime('2008-07-13 14:15:1 ')
Traceback (most recent call last):
DatetimeValueError: "2008-07-13 14:15:1 " ... second should have 2 digits
>>> scan_datetime('2008-07-13 14:5:16 ')
Traceback (most recent call last):
DatetimeValueError: "2008-07-13 14:5:16 " ... minute should have 2 digits
>>> scan_datetime('20080713 14151 ')
Traceback (most recent call last):
DatetimeValueError: "20080713 14151 " ... time should have 4 or 6 digits (HHMM or HHMMSS)
>>> scan_datetime('20080713 1.234')
Traceback (most recent call last):
DatetimeValueError: "20080713 1.234" ... time should have 6 digits before decimal point (HHMMSS.sss)
"""
global _datepat, _colontimepat, _numerictimepat
if endpos is None:
endpos = len(string)
datematch = _datepat.match(string, pos, endpos)
if datematch is None:
return None, pos
try:
nextpos = datematch.end()
yyyy, m, d, mm, dd = datematch.groups()
if m:
# delimited format
if len(d) <> 2 and len(d) <> 1:
raise ValueError, 'day should have 1 or 2 digits'
year, month, day = int(yyyy), int(m), int(d)
timepat = _colontimepat
else:
# numeric format
if len(dd) > 2:
raise ValueError, 'date should have 8 digits (YYYYMMDD)'
year, month, day = int(yyyy), int(mm), int(dd)
timepat = _numerictimepat
timematch = timepat.match(string, nextpos, endpos)
if timematch is None:
return date(year, month, day), nextpos
nextpos = timematch.end()
if m:
# delimited format
hh, mm, ss, frac = timematch.groups()
if len(hh) > 2:
raise ValueError, 'hour should have 1 or 2 digits'
if len(mm) <> 2:
raise ValueError, 'minute should have 2 digits'
if ss is not None and len(ss) <> 2:
raise ValueError, 'second should have 2 digits'
else:
# numeric format
hhmmss, frac = timematch.groups()
if len(hhmmss) == 6:
hh, mm, ss = hhmmss[:2], hhmmss[2:4], hhmmss[4:]
elif frac:
raise ValueError, 'time should have 6 digits before decimal point (HHMMSS.sss)'
elif len(hhmmss) == 4:
hh, mm, ss = hhmmss[:2], hhmmss[2:], None
else:
raise ValueError, 'time should have 4 or 6 digits (HHMM or HHMMSS)'
if frac:
microsecond = int((frac + '000000')[1:7])
dt = datetime(year, month, day, int(hh), int(mm), int(ss), microsecond)
elif ss:
dt = datetime(year, month, day, int(hh), int(mm), int(ss))
else:
dt = datetime(year, month, day, int(hh), int(mm))
return dt, nextpos
except ValueError, e:
# Nonsensical date or time (e.g. field out of range, such as month > 12)
raise DatetimeValueError(str(e), string, pos, nextpos)
#------------------------------- timedelta -------------------------------
signed_duration_syntax_msg = 'Specify duration as [hours][:minutes[:seconds[.fraction]]]'
unsigned_duration_syntax_msg = 'Specify duration as [+|-][hours][:minutes[:seconds[.fraction]]]'
def str_to_duration(string, pos=0, endpos=None, signed=True):
"""
Interprets string[pos:endpos] as a duration or length of time in the form:
[+|-][hours][:minutes[:seconds[.fraction]]]
If string[pos:endpos] contains a valid duration, it is converted to a
datetime.timedelta object, which is returned.
Raises DatetimeValueError if string[pos:endpos] does not contain a
valid duration; or if the duration includes a '+' or '-' sign and the
caller specifies signed=False; or if the duration is preceded or
followed by anything but whitespace.
Examples...
>>> str_to_duration('48')
datetime.timedelta(2)
>>> str_to_duration(' :120 ')
datetime.timedelta(0, 7200)
>>> str_to_duration(':0:72')
datetime.timedelta(0, 72)
>>> str_to_duration(':1:30')
datetime.timedelta(0, 90)
>>> str_to_duration('1:2:3.123456789')
datetime.timedelta(0, 3723, 123456)
# slicing
>>> str_to_duration('9249', 1, 3)
datetime.timedelta(1)
# duration can be negative if signed=True
>>> str_to_duration('-1')
datetime.timedelta(-1, 82800)
>>> str_to_duration('-:1')
datetime.timedelta(-1, 86340)
Errors... (for more, see scan_duration() below)
>>> str_to_duration('')
Traceback (most recent call last):
DatetimeValueError: Specify duration as [hours][:minutes[:seconds[.fraction]]]
>>> str_to_duration(':')
Traceback (most recent call last):
DatetimeValueError: ":" ... Specify duration as [hours][:minutes[:seconds[.fraction]]]
>>> str_to_duration('1:2: 3')
Traceback (most recent call last):
DatetimeValueError: "1:2: 3" ... duration is followed by unrecognized ": 3"
"""
global duration_syntax_msg
if endpos is None:
endpos = len(string)
value, nextpos = scan_duration(string, pos, endpos, signed)
if value is None:
# string is empty or doesn't conform to the syntax we require
if signed:
raise DatetimeValueError(signed_duration_syntax_msg, string, pos, endpos)
else:
raise DatetimeValueError(unsigned_duration_syntax_msg, string, pos, endpos)
elif nextpos < endpos and not string[nextpos:endpos].isspace():
# got valid duration, but there is something more after it
msg = ('duration is followed by unrecognized "%s"'
% string[nextpos:min(nextpos+10,endpos)].strip())
raise DatetimeValueError(msg, string, pos, endpos)
return value
def scan_duration(string, pos=0, endpos=None, signed=True):
"""
Consumes an initial substring of the slice string[pos:endpos]
representing a duration of time. Leading whitespace is ignored.
If 'signed' is True, a '+' or '-' sign may precede the duration.
Note that a negative duration stored in a datetime.timedelta object
is normalized so that only the 'days' field is negative; for example,
'-:0:0.000001' is represented as datetime.timedelta(days=-1,
seconds=86399, microseconds=999999).
(None, pos) is returned if the input does not conform to the
[+|-][hours][:minutes[:seconds[.fraction]]] format.
If a valid duration is found, a tuple (td, nextpos) is returned,
where 'td' is a datetime.timedelta object, and 'nextpos' is the
index of the next character of the string (pos <= nextpos <= endpos).
Raises DatetimeValueError if the text syntactically resembles a
duration but fails semantic checks (e.g. field out of range, such
as minutes > 59 in '100:60:00'); or if 'signed' is False and a
'+' or '-' sign is found.
Examples... (for other syntax examples, see str_to_duration() above)
>>> scan_duration('1:2:3.4')
(datetime.timedelta(0, 3723, 400000), 7)
>>> scan_duration('')
(None, 0)
>>> scan_duration('bad')
(None, 0)
Errors...
>>> scan_duration('100:200:300')
Traceback (most recent call last):
DatetimeValueError: "100:200:300" ... minutes should be in 0..59
>>> scan_duration('-:1', signed=False)
Traceback (most recent call last):
DatetimeValueError: "-:1" ... duration should be unsigned
"""
global _durationpat
if endpos is None:
endpos = len(string)
match = _durationpat.match(string, pos, endpos)
sign, h, m, s, f = match.groups()
if not h and not m:
return None, pos
try:
hours = minutes = seconds = microseconds = 0
if h:
hours = int(h)
if m:
minutes = int(m)
if s:
seconds = int(s)
if f and len(f) > 1:
microseconds = int((f + '000000')[1:7])
if hours > 0 and minutes > 59:
raise ValueError('minutes should be in 0..59')
minutes += hours * 60
if minutes > 0 and seconds > 59:
raise ValueError('seconds should be in 0..59')
seconds += minutes * 60
td = timedelta(seconds=seconds, microseconds=microseconds)
if sign:
if not signed:
raise ValueError('duration should be unsigned')
if sign == '-':
td = -td
return td, match.end()
except OverflowError, e:
# Nonsensical duration (e.g. field out of range)
raise DatetimeValueError(str(e), string, pos, match.end())
except ValueError, e:
# Nonsensical duration (e.g. field out of range)
raise DatetimeValueError(str(e), string, pos, match.end())
#-------------------------------- private --------------------------------
_datepat = (r'\s*' # skip any leading whitespace
r'(\d\d\d\d)' # yyyy \1
r'(?:' # followed by either
r'(?:-(\d\d?)-(\d*))|' # -m[m]-d[d] or \2 \3
r'(?:(\d\d)(\d\d+))' # mmdd \4 \5
r')') # note dd absorbs excess digits
_colontimepat = (r'(?:\s+|[Tt])' # whitespace or 'T'
r'(\d+)' # [h]h \1
r':(\d+)' # :mm \2
r'(?::(\d*)(\.\d*)?)?') # [:ss[.frac]] \3 \4
# hh, mm, ss absorb excess digits
_numerictimepat = (r'(?:\s+|[Tt])' # whitespace or 'T'
r'(\d+)' # hhmmss \1
r'(\.\d*)?') # [.frac] \2
# hhmmss absorbs excess digits
_durationpat = (r'\s*' # skip any leading whitespace
r'([+-])?' # [+|-] \1
r'(\d*)' # [hours] \2
r'(?:' # [
r':(\d+)' # :minutes \3
r'(?::(\d+)(\.\d*)?)?' # [:seconds[.frac]] \4 \5
r')?') # ]
_spacepat = r'\s*' # whitespace
_datepat = re.compile(_datepat)
_colontimepat = re.compile(_colontimepat)
_numerictimepat = re.compile(_numerictimepat)
_durationpat = re.compile(_durationpat)
_spacepat = re.compile(_spacepat)
# If invoked as a script, execute the examples in each function's docstring
# and verify the expected output. Produces no output if verification is
# successful, unless -v is specified. Use -v option for verbose test output.
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| apache-2.0 | 6,031,873,824,161,372,000 | 40.096436 | 196 | 0.585268 | false |
konsP/synnefo | snf-tools/synnefo_tools/burnin/logger.py | 8 | 15129 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This is the logging class for burnin
It supports logging both for the stdout/stderr as well as file logging at the
same time.
The stdout/stderr logger supports verbose levels and colors but the file
logging doesn't (we use the info verbose level for our file logger).
Our loggers have primitive support for handling parallel execution (even though
burnin doesn't support it yet). To do so the stdout/stderr logger prepends the
name of the test under execution to every line it prints. On the other hand the
file logger waits to lock the file, then reads it, prints the message to the
corresponding line and closes the file.
"""
import os
import sys
import os.path
import logging
import datetime
from synnefo_tools.burnin import filelocker
# --------------------------------------------------------------------
# Constant variables
LOCK_EXT = ".lock"
SECTION_SEPARATOR = \
"-- -------------------------------------------------------------------"
SECTION_PREFIX = "-- "
SECTION_RUNNED = "Tests Run"
SECTION_RESULTS = "Results"
SECTION_NEW = "__ADD_NEW_SECTION__"
SECTION_PASSED = " * Passed:"
SECTION_FAILED = " * Failed:"
# Ignore `paramiko' logger
logging.getLogger('paramiko').addHandler(logging.NullHandler())
# --------------------------------------------------------------------
# Helper functions
def _cyan(msg):
"""Bold High Intensity Cyan color"""
return "\x1b[1;96m" + str(msg) + "\x1b[0m"
def _yellow(msg):
"""Yellow color"""
return "\x1b[33m" + str(msg) + "\x1b[0m"
def _red(msg):
"""Yellow color"""
return "\x1b[31m" + str(msg) + "\x1b[0m"
def _magenta(msg):
"""Magenta color"""
return "\x1b[35m" + str(msg) + "\x1b[0m"
def _green(msg):
"""Green color"""
return "\x1b[32m" + str(msg) + "\x1b[0m"
def _format_message(msg, *args):
"""Format the message using the args"""
if args:
return (msg % args) + "\n"
else:
return msg + "\n"
def _list_to_string(lst, append=""):
"""Convert a list of strings to string
Append the value given in L{append} in front of all lines
(except of the first line).
"""
if isinstance(lst, list):
return append.join(lst).rstrip('\n')
else:
return lst.rstrip('\n')
# --------------------------------------
def _locate_sections(contents):
"""Locate the sections inside the logging file"""
i = 0
res = []
for cnt in contents:
if SECTION_SEPARATOR in cnt:
res.append(i+1)
i += 1
return res
def _locate_input(contents, section):
"""Locate position to insert text
Given a section location the next possition to insert text inside that
section.
"""
sect_locs = _locate_sections(contents)
if section == SECTION_NEW:
# We want to add a new section
# Just return the position of SECTION_RESULTS
for obj in sect_locs:
if SECTION_RESULTS in contents[obj]:
return obj - 1
else:
# We will add our message in this location
for (index, obj) in enumerate(sect_locs):
if section in contents[obj]:
return sect_locs[index + 1] - 3
# We didn't find our section??
sys.stderr.write("Section %s could not be found in logging file\n"
% section)
sys.exit("Error in logger._locate_input")
def _add_testsuite_results(contents, section, testsuite):
"""Add the given testsuite to results
Well we know that SECTION_FAILED is the last line and SECTION_PASSED is the
line before, so we are going to cheat here and use this information.
"""
if section == SECTION_PASSED:
line = contents[-2].rstrip()
if line.endswith(":"):
new_line = line + " " + testsuite + "\n"
else:
new_line = line + ", " + testsuite + "\n"
contents[-2] = new_line
elif section == SECTION_FAILED:
line = contents[-1].rstrip()
if line.endswith(":"):
new_line = line.rstrip() + " " + testsuite + "\n"
else:
new_line = line.rstrip() + ", " + testsuite + "\n"
contents[-1] = new_line
else:
sys.stderr.write("Unknown section %s in _add_testsuite_results\n"
% section)
sys.exit("Error in logger._add_testsuite_results")
return contents
def _write_log_file(file_location, section, message):
"""Write something to our log file
For this we have to get the lock, read and parse the file add the new
message and re-write the file.
"""
# Get the lock
file_lock = os.path.splitext(file_location)[0] + LOCK_EXT
with filelocker.lock(file_lock, filelocker.LOCK_EX):
with open(file_location, "r+") as log_file:
contents = log_file.readlines()
if section == SECTION_PASSED or section == SECTION_FAILED:
# Add testsuite to results
new_contents = \
_add_testsuite_results(contents, section, message)
else:
# Add message to its line
input_loc = _locate_input(contents, section)
new_contents = \
contents[:input_loc] + [message] + contents[input_loc:]
log_file.seek(0)
log_file.write("".join(new_contents))
# --------------------------------------------------------------------
# The Log class
class Log(object):
"""Burnin logger
"""
# ----------------------------------
# pylint: disable=too-many-arguments
def __init__(self, output_dir, verbose=1, use_colors=True,
in_parallel=False, log_level=0, curr_time=None):
"""Initialize our loggers
The file to be used by our file logger will be created inside
the L{output_dir} with name the current timestamp.
@type output_dir: string
@param output_dir: the directory to save the output file
@type verbose: int
@param verbose: the verbose level to use for stdout/stderr logger
0: verbose at minimum level (only which test we are running now)
1: verbose at info level (information about our running test)
2: verbose at debug level
@type use_colors: boolean
@param use_colors: use colors for out stdout/stderr logger
@type in_parallel: boolean
@param in_parallel: this signifies that burnin is running in parallel
@type log_level: int
@param log_level: logging level
0: log to console and file
1: log to file only and output the results to console
2: don't log
@type curr_time: datetime.datetime()
@param curr_time: The current time (used as burnin's run id)
"""
self.verbose = verbose
self.use_colors = use_colors
self.in_parallel = in_parallel
self.log_level = log_level
assert output_dir
if curr_time is None:
curr_time = datetime.datetime.now()
timestamp = datetime.datetime.strftime(
curr_time, "%Y%m%d%H%M%S (%a %b %d %Y %H:%M)")
file_name = timestamp + ".log"
self.file_location = os.path.join(output_dir, file_name)
self._write_to_stdout(None, "Starting burnin with id %s\n" % timestamp)
# Create the logging file
self._create_logging_file(timestamp, output_dir)
def _create_logging_file(self, timestamp, output_dir):
"""Create the logging file"""
if self.log_level > 1:
return
# Create file for logging
output_dir = os.path.expanduser(output_dir)
if not os.path.exists(output_dir):
self.debug(None, "Creating directory %s", output_dir)
try:
os.makedirs(output_dir)
except OSError as err:
msg = ("Failed to create folder \"%s\" with error: %s\n"
% (output_dir, err))
sys.stderr.write(msg)
sys.exit("Failed to create log folder")
self.debug(None, "Using \"%s\" file for logging", self.file_location)
with open(self.file_location, 'w') as out_file:
out_file.write(SECTION_SEPARATOR + "\n")
out_file.write("%s%s with id %s:\n\n\n\n" %
(SECTION_PREFIX, SECTION_RUNNED, timestamp))
out_file.write(SECTION_SEPARATOR + "\n")
out_file.write("%s%s:\n\n" % (SECTION_PREFIX, SECTION_RESULTS))
out_file.write(SECTION_PASSED + "\n" + SECTION_FAILED + "\n")
def __del__(self):
"""Delete the Log object"""
self.print_logfile_to_stdout()
# Remove the lock file
if hasattr(self, "file_location"):
file_lock = os.path.splitext(self.file_location)[0] + LOCK_EXT
try:
os.remove(file_lock)
except OSError:
self.debug(None, "Couldn't delete lock file")
def print_logfile_to_stdout(self):
"""Print the contents of our log file to stdout"""
if self.log_level == 1:
with open(self.file_location, 'r') as fin:
sys.stdout.write(fin.read())
# ----------------------------------
# Logging methods
def debug(self, section, msg, *args):
"""Debug messages (verbose 2)
We show debug messages only to stdout. The message will be formatted
using the args.
"""
msg = " (DD) " + _list_to_string(msg, append=" ")
if self.verbose >= 2:
colored_msg = self._color_message(None, msg, *args)
self._write_to_stdout(section, colored_msg)
def log(self, section, msg, *args):
"""Normal messages (verbose 0)"""
assert section, "Section can not be empty"
msg = _list_to_string(msg)
colored_msg = self._color_message(None, msg, *args)
self._write_to_stdout(section, colored_msg)
plain_msg = _format_message(msg, *args)
self._write_to_file(section, plain_msg)
def info(self, section, msg, *args):
"""Info messages (verbose 1)
Prepare message and write it to file logger and stdout logger
"""
assert section, "Section can not be empty"
msg = " " + _list_to_string(msg, " ")
if self.verbose >= 1:
colored_msg = self._color_message(None, msg, *args)
self._write_to_stdout(section, colored_msg)
plain_msg = _format_message(msg, *args)
self._write_to_file(section, plain_msg)
def warning(self, section, msg, *args):
"""Warning messages"""
assert section, "Section can not be empty"
msg = " (WW) " + _list_to_string(msg, " ")
colored_msg = self._color_message(_yellow, msg, *args)
self._write_to_stderr(section, colored_msg)
plain_msg = _format_message(msg, *args)
self._write_to_file(section, plain_msg)
def error(self, section, msg, *args):
"""Error messages"""
assert section, "Section can not be empty"
msg = " (EE) " + _list_to_string(msg, " ")
colored_msg = self._color_message(_red, msg, *args)
self._write_to_stderr(section, colored_msg)
plain_msg = _format_message(msg, *args)
self._write_to_file(section, plain_msg)
def _write_to_stdout(self, section, msg):
"""Write to stdout"""
if self.log_level > 0:
return
if section is not None and self.in_parallel:
sys.stdout.write(section + ": " + msg)
else:
sys.stdout.write(msg)
def _write_to_stderr(self, section, msg):
"""Write to stderr"""
if self.log_level > 0:
return
if section is not None and self.in_parallel:
sys.stderr.write(section + ": " + msg)
else:
sys.stderr.write(msg)
def _write_to_file(self, section, msg):
"""Write to file"""
if self.log_level > 1:
return
_write_log_file(self.file_location, section, msg)
# ----------------------------------
# Handle testsuites
def testsuite_start(self, testsuite):
"""Start a new testsuite
Add a new section in the logging file
"""
assert testsuite, "Testsuite name can not be emtpy"
# Add a new section in the logging file
test_runned = " * " + testsuite + "\n"
self._write_to_file(SECTION_RUNNED, test_runned)
new_section_entry = \
SECTION_SEPARATOR + "\n" + SECTION_PREFIX + testsuite + "\n\n\n\n"
self._write_to_file(SECTION_NEW, new_section_entry)
# Add new section to the stdout
msg = "Starting testsuite %s" % testsuite
colored_msg = self._color_message(_magenta, msg)
self._write_to_stdout(None, colored_msg)
def testsuite_success(self, testsuite):
"""A testsuite has successfully finished
Update Results
"""
assert testsuite, "Testsuite name can not be emtpy"
# Add our testsuite to Results
self._write_to_file(SECTION_PASSED, testsuite)
# Add success to stdout
msg = "Testsuite %s passed" % testsuite
colored_msg = self._color_message(_green, msg)
self._write_to_stdout(None, colored_msg)
def testsuite_failure(self, testsuite):
"""A testsuite has failed
Update Results
"""
assert testsuite, "Testsuite name can not be emtpy"
# Add our testsuite to Results
self._write_to_file(SECTION_FAILED, testsuite)
# Add success to stdout
msg = "Testsuite %s failed" % testsuite
colored_msg = self._color_message(_red, msg)
self._write_to_stdout(None, colored_msg)
# ----------------------------------
# Colors
def _color_message(self, color_fun, msg, *args):
"""Color a message before printing it
The color_fun parameter is used when we want the whole message to be
colored.
"""
if self.use_colors:
if callable(color_fun):
if args:
return color_fun((msg % args)) + "\n"
else:
return color_fun(msg) + "\n"
else:
args = tuple([_cyan(arg) for arg in args])
return _format_message(msg, *args)
else:
return _format_message(msg, *args)
| gpl-3.0 | -2,835,812,318,677,861,000 | 31.960784 | 79 | 0.572146 | false |
abrt/gnome-abrt | src/gnome_abrt/errors.py | 1 | 1218 | ## Copyright (C) 2012 ABRT team <[email protected]>
## Copyright (C) 2001-2005 Red Hat, Inc.
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
class GnomeAbrtError(Exception):
pass
class InvalidProblem(GnomeAbrtError):
def __init__(self, problem_id, message=None):
super().__init__(message)
self.problem_id = problem_id
class UnavailableSource(GnomeAbrtError):
def __init__(self, source, temporary, message=None):
super().__init__(message)
self.source = source
self.temporary = temporary
| gpl-3.0 | -2,410,095,344,725,157,400 | 35.909091 | 79 | 0.716749 | false |
formiano/enigma2-4.4 | lib/python/Screens/VfdSkinSelector.py | 13 | 6028 | from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Plugins.Plugin import PluginDescriptor
import Components.config
from Components.Label import Label
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from os import path, walk
from enigma import eEnv
from skin import *
import os
class VFDSkinSelector(Screen):
skin = """
<screen name="VFD Skin-Selector" position="center,center" size="700,400" title="VFD Skin-Selector" transparent="0">
<eLabel text="Select skin:" position="50,30" size="250,26" zPosition="1" foregroundColor="#FFE500" font="Regular;22" halign="left" />
<eLabel text="Preview:" position="380,30" size="250,26" zPosition="1" foregroundColor="#FFE500" font="Regular;22" halign="left" />
<widget name="SkinList" render="Listbox" position="50,60" size="270,200" zPosition="1" enableWrapAround="1" scrollbarMode="showOnDemand" />
<widget name="Preview" position="380,65" size="280,210" zPosition="1" backgroundColor="background" transparent="0" alphatest="on" />
<eLabel text="Select your skin and press OK to activate the selected skin" position="0,307" halign="center" size="700,26" zPosition="1" foregroundColor="#FFE500" font="Regular;22" />
<ePixmap name="red" position="50,350" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="220,350" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="blue" position="520,350" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="50,350" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="220,350" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="520,350" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
skinlist = []
root = eEnv.resolve("/usr/share/enigma2/display/")
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.list = []
self.previewPath = ""
self.actual = None
path.walk(self.root, self.find, "")
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self.skinlist.sort()
self["SkinList"] = MenuList(self.list)
self["Preview"] = Pixmap()
self["actions"] = NumberActionMap(["WizardActions", "OkCancelActions", "ColorActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"green": self.ok,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"blue": self.info,
}, -1)
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("OK"))
self["key_blue"] = Label(_("Info"))
self.fill()
self.onLayoutFinish.append(self.layoutFinished)
def fill(self):
i = 0
self.filesArray = sorted(filter(lambda x: x.endswith('.xml'), os.listdir(self.root)))
config.skin.display_skin = ConfigSelection(choices = self.filesArray)
while i < len(self.filesArray):
self.list.append((_(self.filesArray[i].split('.')[0]), "chose"))
i = i + 1
else:
pass
idx = 0
def layoutFinished(self):
tmp = "config.skin.display_skin.value"
tmp = eval(tmp)
idx = 0
i = 0
while i < len(self.list):
if tmp.split('.')[0] in self.list[i][0]:
idx = i
break
else:
pass
i += 1
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self["SkinList"].l.getCurrentSelection()
self.loadPreview()
def down(self):
self["SkinList"].down()
self["SkinList"].l.getCurrentSelection()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self["SkinList"].l.getCurrentSelection()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self["SkinList"].l.getCurrentSelection()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("\nVFD Skin-Selector\nby satinfo & henrylicious (thank you for support)\n\nPlugin to select skin for VFD-Display\n\n - for GigaBlue UE and GigaBlue Quad\n - for VU+ Ultimo and VU+ Duo2"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def find(self, arg, dirname, names):
for x in names:
if x.startswith("skinvfd") and x.endswith(".xml"):
if dirname <> self.root:
subdir = dirname[19:]
skinname = x
skinname = subdir + "/" + skinname
self.list.append(skinname)
else:
skinname = x
self.list.append(skinname)
def ok(self):
skinfile = self["SkinList"].getCurrent()[0] + ".xml"
addSkin(skinfile, SCOPE_CONFIG)
config.skin.display_skin.value = skinfile
config.skin.display_skin.save()
print "Selected Value", config.skin.display_skin.value
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply new skin.\nDo you want to Restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
pngpath = self["SkinList"].l.getCurrentSelection()[0] + "_prev.png"
try:
pngpath = self.root + pngpath
except AttributeError:
pass
if not os.path.exists(pngpath):
pngpath = "/usr/share/enigma2/display/noprev.png"
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
Screen.hide(self)
Screen.show(self)
def restartGUI(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 3)
| gpl-2.0 | -1,301,362,934,798,818,600 | 37.394904 | 253 | 0.694592 | false |
himanshu-setia/keystone | keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py | 8 | 2938 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# You can specify primary keys when creating tables, however adding
# auto-increment integer primary keys for existing tables is not
# cross-engine compatibility supported. Thus, the approach is to:
# (1) create a new revocation_event table with an int pkey,
# (2) migrate data from the old table to the new table,
# (3) delete the old revocation_event table
# (4) rename the new revocation_event table
revocation_table = sql.Table('revocation_event', meta, autoload=True)
revocation_table_new = sql.Table(
'revocation_event_new',
meta,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('domain_id', sql.String(64)),
sql.Column('project_id', sql.String(64)),
sql.Column('user_id', sql.String(64)),
sql.Column('role_id', sql.String(64)),
sql.Column('trust_id', sql.String(64)),
sql.Column('consumer_id', sql.String(64)),
sql.Column('access_token_id', sql.String(64)),
sql.Column('issued_before', sql.DateTime(), nullable=False),
sql.Column('expires_at', sql.DateTime()),
sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
sql.Column('audit_id', sql.String(32), nullable=True),
sql.Column('audit_chain_id', sql.String(32), nullable=True))
revocation_table_new.create(migrate_engine, checkfirst=True)
revocation_table_new.insert().from_select(['domain_id',
'project_id',
'user_id',
'role_id',
'trust_id',
'consumer_id',
'access_token_id',
'issued_before',
'expires_at',
'revoked_at',
'audit_id',
'audit_chain_id'],
revocation_table.select())
revocation_table.drop()
revocation_table_new.rename('revocation_event')
| apache-2.0 | 363,206,148,695,778,300 | 46.387097 | 77 | 0.552076 | false |
BondAnthony/ansible | test/lib/ansible_test/_data/sanity/compile/compile.py | 34 | 1185 | #!/usr/bin/env python
"""Python syntax checker with lint friendly output."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import warnings
with warnings.catch_warnings():
# The parser module is deprecated as of Python 3.9.
# This implementation will need to be updated to use another solution.
# Until then, disable the deprecation warnings to prevent test failures.
warnings.simplefilter('ignore', DeprecationWarning)
import parser
import sys
def main():
status = 0
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'rb') as source_fd:
if sys.version_info[0] == 3:
source = source_fd.read().decode('utf-8')
else:
source = source_fd.read()
try:
parser.suite(source)
except SyntaxError:
ex = sys.exc_info()[1]
status = 1
message = ex.text.splitlines()[0].strip()
sys.stdout.write("%s:%d:%d: SyntaxError: %s\n" % (path, ex.lineno, ex.offset, message))
sys.stdout.flush()
sys.exit(status)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,646,037,689,439,263,000 | 27.902439 | 99 | 0.599156 | false |
andreypopp/rrouter | docs/conf.py | 1 | 8228 | # -*- coding: utf-8 -*-
#
# rrouter documentation build configuration file, created by
# sphinx-quickstart on Wed May 21 02:47:06 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinxcontrib.jsx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rrouter'
copyright = u'2014, Andrey Popp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rrouterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'rrouter.tex', u'rrouter Documentation',
u'Andrey Popp', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rrouter', u'rrouter Documentation',
[u'Andrey Popp'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rrouter', u'rrouter Documentation',
u'Andrey Popp', 'rrouter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 1,727,973,927,432,875,000 | 30.76834 | 79 | 0.708192 | false |
teemulehtinen/a-plus | edit_course/operations/clone.py | 3 | 2013 | from django.db import transaction
def clone_learning_objects(category_map, module, objects, parent):
"""
Clones learning objects recursively.
"""
for lobject in list(a.as_leaf_class() for a in objects):
children = list(lobject.children.all())
# Save as new learning object.
lobject.id = None
lobject.modelwithinheritance_ptr_id = None
if hasattr(lobject, "learningobject_ptr_id"):
lobject.learningobject_ptr_id = None
if hasattr(lobject, "baseexercise_ptr_id"):
lobject.baseexercise_ptr_id = None
lobject.category = category_map[lobject.category.id]
lobject.course_module = module
lobject.parent = parent
lobject.save()
clone_learning_objects(category_map, module, children, lobject)
@transaction.atomic
def clone(instance, url):
"""
Clones the course instance and returns the new saved instance.
"""
assistants = list(instance.assistants.all())
usertags = list(instance.usertags.all())
categories = list(instance.categories.all())
modules = list(instance.course_modules.all())
# Save as new course instance.
instance.id = None
instance.visible_to_students = False
instance.url = url
instance.save()
instance.assistants.add(*assistants)
for usertag in usertags:
usertag.id = None
usertag.course_instance = instance
usertag.save()
category_map = {}
for category in categories:
old_id = category.id
# Save as new category.
category.id = None
category.course_instance = instance
category.save()
category_map[old_id] = category
for module in modules:
objects = list(module.learning_objects.filter(parent__isnull=True))
# Save as new module.
module.id = None
module.course_instance = instance
module.save()
clone_learning_objects(category_map, module, objects, None)
return instance
| gpl-3.0 | 386,986,877,870,882,300 | 28.173913 | 75 | 0.645306 | false |
Subsets and Splits