repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
gams/influxdump | influxdump/bin/influxdump.py | 1 | 6035 | # -*- coding: utf-8 -*-
import argparse
import getpass
import json
import sys
from influxdump.data import dump_data, load_file, load_folder
from influxdump.db import get_client
from influxdump.exceptions import TypecastError
CHUNKSIZE = 50000
def get_args():
parser = argparse.ArgumentParser(description='influxDB data backup tool')
parser.add_argument('-c', '--chunksize',
help='query chunk size, default to {}'.format(CHUNKSIZE),
type=int, default=CHUNKSIZE)
parser.add_argument('-d', '--database', help='database', required=True,
type=str)
parser.add_argument('-e', '--end', default='', type=str,
help="""
Exclude all results after the specified timestamp (RFC3339 format).
""")
parser.add_argument('-F', '--folder', default=None,
help="""
destination folder for fragmented dump, if this flag is not used
then dump on stdout
""")
parser.add_argument('-H', '--host', help='server host',
default="localhost", type=str)
parser.add_argument('-i', '--input', default=None,
help="data/metadata input file, will force action to 'load'")
parser.add_argument('-L', '--legacy', action="store_true",
help='influxdb legacy client (<=0.8)')
parser.add_argument('-m', '--measurements', help='measurement pattern')
parser.add_argument('-n', '--dry-run', help='do not really do anything',
action="store_true")
parser.add_argument('-p', '--port', help='server port', default=8086,
type=int)
parser.add_argument('-r', '--retry', default=0, type=int,
help="""
Retry a dump query in case of problem, 0 to disable, defaults to 0
""")
parser.add_argument('-s', '--start', default='', type=str,
help="""
Include all points starting with the specified timestamp (RFC3339
format).
If used without --start, all data will be backed up starting from
1970-01-01T00:00:00Z
""")
parser.add_argument('-t', '--typecast',
help="""
Enable casting field types based on file, meta or auto discovery
if possible. When used with 'dump', will add casting infor in meta.
When used with 'load', will try to find casting info. If casting is
enabled but no casting info can be found, the program will exit.
""", action="store_true")
parser.add_argument('--castfile',
help="""
File containing casting definitions, will supersede any other type
cast definition
""", type=str, default='')
parser.add_argument('-u', '--user', help='username', default='', type=str)
parser.add_argument('-v', '--verbose', help='make the script verbose',
action="store_true")
parser.add_argument('-w', '--password', help='password', default='',
type=str)
parser.add_argument('-W', '--pwdprompt', help='password prompt',
action="store_true")
parser.add_argument('action', metavar="action", nargs="?", default='dump',
help="""
action, can be 'dump' or 'load', default to 'dump'. If action is
'load', one input file (--input) or a folder with data to load has
to be provided
""", choices=["load", "dump"])
args = parser.parse_args()
if args.pwdprompt is True:
pwd = getpass.getpass()
else:
pwd = args.password
if args.end != "" and args.start == "":
args.start = "1970-01-01T00:00:00Z"
if args.castfile != '':
with open(args.castfile, 'r') as fd:
cast = json.load(fd)
else:
cast = {}
if args.action == "load" \
and args.input is None and args.folder is None:
sys.stderr.write("Action is load, missing input file or folder\n\n")
parser.print_help()
sys.exit(1)
return {
"chunksize": args.chunksize,
"db": args.database,
"end": args.end,
"folder": args.folder,
"host": args.host,
"input": args.input,
"legacy": args.legacy,
"measurements": args.measurements,
"dryrun": args.dry_run,
"port": args.port,
"retry": args.retry,
"start": args.start,
"user": args.user,
"verbose": args.verbose,
"pwd": pwd,
"action": args.action,
"typecast": args.typecast,
"cast": cast,
}
def dump(args, client):
dump_data(
client,
args["measurements"],
args["folder"],
dryrun=args["dryrun"],
chunk_size=args["chunksize"],
start=args["start"],
end=args["end"],
retry=args["retry"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
def load(args, client):
if args["input"] is not None:
load_file(
client,
args["input"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
else:
load_folder(
client,
args["folder"],
pattern=args["measurements"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
def main():
args = get_args()
client = get_client(
host=args["host"],
port=args["port"],
user=args["user"],
pwd=args["pwd"],
db=args["db"],
legacy=args["legacy"],
)
if args["action"] == "load" or args["input"] is not None:
load(args, client)
else:
dump(args, client)
if __name__ == "__main__":
try:
main()
except TypecastError as e:
sys.stderr.write("""Error trying to guess field types for casting,
influxdb < 1.0 did not provide key types when queried.
""")
sys.exit(1)
| apache-2.0 | 4,460,932,903,549,420,500 | 31.978142 | 79 | 0.548467 | false |
Boussadia/weboob | modules/adecco/pages.py | 2 | 4199 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BasePage
from weboob.tools.misc import html2text
from .job import AdeccoJobAdvert
import datetime
import re
__all__ = ['SearchPage', 'AdvertPage']
MONTHS = [u'janvier', u'février', u'mars', u'avril', u'mai', u'juin', u'juillet', u'août', u'septembre', u'octobre', u'novembre', u'décembre']
class SearchPage(BasePage):
def iter_job_adverts(self):
re_id = re.compile('http://www.adecco.fr/trouver-un-emploi/Pages/Details-de-l-Offre/(.*?)/(.*?).aspx\?IOF=(.*?)$', re.DOTALL)
divs = self.document.getroot().xpath("//div[@class='resultContain right']") + self.document.getroot().xpath("//div[@class='resultContain left']")
for div in divs:
a = self.parser.select(div, 'div/a', 1, method='xpath').attrib['href']
if re_id.match(a):
_id = u'%s/%s/%s' % (re_id.search(a).group(1), re_id.search(a).group(2), re_id.search(a).group(3))
advert = AdeccoJobAdvert(_id)
date = u'%s' % self.parser.select(div, "div/span[@class='offreDatePublication']", 1, method='xpath').text
m = re.match('(\d{2})\s(.*?)\s(\d{4})', date)
if m:
dd = int(m.group(1))
mm = MONTHS.index(m.group(2)) + 1
yyyy = int(m.group(3))
advert.publication_date = datetime.date(yyyy, mm, dd)
advert.title = u'%s' % self.parser.select(div, "div/h3/a", 1, method='xpath').text_content()
advert.place = u'%s' % self.parser.select(div, "div/h3/span[@class='offreLocalisation']", 1, method='xpath').text
yield advert
class AdvertPage(BasePage):
def get_job_advert(self, url, advert):
re_id = re.compile('http://www.adecco.fr/trouver-un-emploi/Pages/Details-de-l-Offre/(.*?)/(.*?).aspx\?IOF=(.*?)$', re.DOTALL)
if advert is None:
_id = u'%s/%s/%s' % (re_id.search(url).group(1), re_id.search(url).group(2), re_id.search(url).group(3))
advert = AdeccoJobAdvert(_id)
advert.contract_type = re_id.search(url).group(1)
div = self.document.getroot().xpath("//div[@class='contain_MoreResults']")[0]
date = u'%s' % self.parser.select(div, "div[@class='dateResult']", 1, method='xpath').text.strip()
m = re.match('(\d{2})\s(.*?)\s(\d{4})', date)
if m:
dd = int(m.group(1))
mm = MONTHS.index(m.group(2)) + 1
yyyy = int(m.group(3))
advert.publication_date = datetime.date(yyyy, mm, dd)
title = self.parser.select(div, "h1", 1, method='xpath').text_content().strip()
town = self.parser.select(div, "h1/span/span[@class='town']", 1, method='xpath').text_content()
page_title = self.parser.select(div, "h1/span[@class='pageTitle']", 1, method='xpath').text_content()
advert.title = u'%s' % title.replace(town, '').replace(page_title, '')
spans = self.document.getroot().xpath("//div[@class='jobGreyContain']/table/tr/td/span[@class='value']")
advert.job_name = u'%s' % spans[0].text
advert.place = u'%s' % spans[1].text
advert.pay = u'%s' % spans[2].text
advert.contract_type = u'%s' % spans[3].text
advert.url = url
description = self.document.getroot().xpath("//div[@class='descriptionContainer']/p")[0]
advert.description = html2text(self.parser.tostring(description))
return advert
| agpl-3.0 | -2,828,853,580,874,409,500 | 45.622222 | 153 | 0.603194 | false |
xiangel/hue | desktop/core/src/desktop/migrations/0015_auto__add_unique_documentpermission_doc_perms.py | 20 | 9800 | # -*- coding: utf-8 -*-
import logging
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, transaction
from desktop.models import DocumentPermission
class Migration(SchemaMigration):
def forwards(self, orm):
# If there are duplicated document permissions, we'll have an error
# when we try to create this index. So to protect against that, we
# should delete those documents before we create the index.
duplicated_records = DocumentPermission.objects \
.values('doc_id', 'perms') \
.annotate(id_count=models.Count('id')) \
.filter(id_count__gt=1)
# Delete all but the first document.
for record in duplicated_records:
docs = DocumentPermission.objects \
.values_list('id', flat=True) \
.filter(
doc_id=record['doc_id'],
perms=record['perms'],
)[1:]
docs = list(docs)
logging.warn('Deleting permissions %s' % docs)
DocumentPermission.objects.filter(id__in=docs).delete()
# Adding unique constraint on 'DocumentPermission', fields ['doc', 'perms']
db.create_unique(u'desktop_documentpermission', ['doc_id', 'perms'])
def backwards(self, orm):
# Removing unique constraint on 'DocumentPermission', fields ['doc', 'perms']
db.delete_unique(u'desktop_documentpermission', ['doc_id', 'perms'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'desktop.document': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'Document'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'extra': ('django.db.models.fields.TextField', [], {'default': "''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc_owner'", 'to': u"orm['auth.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['desktop.DocumentTag']", 'db_index': 'True', 'symmetrical': 'False'}),
'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'desktop.document2': {
'Meta': {'object_name': 'Document2'},
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'dependencies': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'dependencies_rel_+'", 'db_index': 'True', 'to': u"orm['desktop.Document2']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'extra': ('django.db.models.fields.TextField', [], {'default': "''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_history': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc2_owner'", 'to': u"orm['auth.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tags_rel_+'", 'db_index': 'True', 'to': u"orm['desktop.Document2']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'a0047d19-d898-42e2-a174-6d332f236662'", 'max_length': '36', 'db_index': 'True'}),
'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'desktop.documentpermission': {
'Meta': {'unique_together': "(('doc', 'perms'),)", 'object_name': 'DocumentPermission'},
'doc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['desktop.Document']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'to': u"orm['auth.Group']", 'db_table': "'documentpermission_groups'", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'perms': ('django.db.models.fields.CharField', [], {'default': "'read'", 'max_length': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'to': u"orm['auth.User']", 'db_table': "'documentpermission_users'", 'symmetrical': 'False'})
},
u'desktop.documenttag': {
'Meta': {'unique_together': "(('owner', 'tag'),)", 'object_name': 'DocumentTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'desktop.settings': {
'Meta': {'object_name': 'Settings'},
'collect_usage': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tours_and_tutorials': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'})
},
u'desktop.userpreferences': {
'Meta': {'object_name': 'UserPreferences'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {'max_length': '4096'})
}
}
complete_apps = ['desktop']
| apache-2.0 | 4,881,376,183,573,856,000 | 68.503546 | 195 | 0.559082 | false |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_a15_appropriations.py | 1 | 2167 | from dataactcore.models.stagingModels import Appropriation
from dataactcore.models.domainModels import SF133
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'a15_appropriations'
def test_column_headers(database):
expected_subset = {'uniqueid_TAS', 'row_number', 'unobligated_balance_cpe', 'expected_value_GTAS SF133 Line 2490',
'difference'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Tests that SF 133 amount sum for line 2490 matches Appropriation unobligated_balance_cpe
for the specified fiscal year and period
"""
tas = 'tas_one_line'
sf = SF133(line=2490, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys',
main_account_code='000', sub_account_code='000')
ap = Appropriation(job_id=1, row_number=1, tas=tas, unobligated_balance_cpe=1)
assert number_of_errors(_FILE, database, models=[sf, ap]) == 0
# Test with split SF133 lines
tas = 'tas_two_lines'
sf_1 = SF133(line=2490, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys',
main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='n')
sf_2 = SF133(line=2490, tas=tas, period=1, fiscal_year=2016, amount=4, agency_identifier='sys',
main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='o')
ap = Appropriation(job_id=1, row_number=1, tas=tas, unobligated_balance_cpe=5)
assert number_of_errors(_FILE, database, models=[sf_1, sf_2, ap]) == 0
def test_failure(database):
""" Tests that SF 133 amount sum for line 2490 does not match Appropriation unobligated_balance_cpe
for the specified fiscal year and period
"""
tas = 'fail_tas'
sf = SF133(line=2490, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys',
main_account_code='000', sub_account_code='000')
ap = Appropriation(job_id=1, row_number=1, tas=tas, unobligated_balance_cpe=0)
assert number_of_errors(_FILE, database, models=[sf, ap]) == 1
| cc0-1.0 | -3,956,086,811,205,402,000 | 42.34 | 118 | 0.677896 | false |
piquadrat/django | tests/admin_views/models.py | 9 | 25125 | import datetime
import os
import tempfile
import uuid
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@property
def name_property(self):
"""
A property that simply returns the name. Used to test #24461
"""
return self.name
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True)
another_section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True, related_name='+')
sub_section = models.ForeignKey(Section, models.SET_NULL, null=True, blank=True, related_name='+')
def __str__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
def model_year_reversed(self):
return self.date.year
model_year_reversed.admin_order_field = '-date'
model_year_reversed.short_description = ''
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name='¿Name?')
def __str__(self):
return self.name
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name='¿Name?')
book = models.ForeignKey(Book, models.CASCADE)
author = models.ForeignKey(User, models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.name
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name='¿Title?')
content = models.TextField()
book = models.ForeignKey(Book, models.CASCADE)
def __str__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
guest_author = models.ForeignKey(User, models.SET_NULL, blank=True, null=True)
def __str__(self):
return '¿Xtra1: %s' % self.xtra
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
class ModelWithStringPrimaryKey(models.Model):
string_pk = models.CharField(max_length=255, primary_key=True)
def __str__(self):
return self.string_pk
def get_absolute_url(self):
return '/dummy/%s/' % self.string_pk
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField(default=False)
def __str__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, models.CASCADE, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
title = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.name
class Inquisition(models.Model):
expected = models.BooleanField(default=False)
leader = models.ForeignKey(Actor, models.CASCADE)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(
Inquisition,
models.CASCADE,
limit_choices_to={
'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
},
)
defendant0 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': False},
related_name='as_defendant0',
)
defendant1 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': True},
related_name='as_defendant1',
)
def __str__(self):
return self.title
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
def __str__(self):
return self.username
class StumpJoke(models.Model):
variation = models.CharField(max_length=100)
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="+",
)
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
def __str__(self):
return self.variation
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField(default=True)
def __str__(self):
return self.name
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __str__(self):
return self.name
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, models.CASCADE, related_name="accounts")
servicename = 'generic service'
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, models.CASCADE, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE, editable=False)
name = models.CharField(max_length=30, blank=True)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
class EmptyModel(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, models.CASCADE, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title, models.CASCADE)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
the_recommender = models.ForeignKey(Recommender, models.CASCADE)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector, models.CASCADE)
expensive = models.BooleanField(default=True)
class Category(models.Model):
collector = models.ForeignKey(Collector, models.CASCADE)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __str__(self):
return '%s:o%s' % (self.id, self.order)
def link_posted_default():
return datetime.date.today() - datetime.timedelta(days=7)
class Link(models.Model):
posted = models.DateField(default=link_posted_default)
url = models.URLField()
post = models.ForeignKey("Post", models.CASCADE)
readonly_link_content = models.TextField()
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost, models.CASCADE)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
readonly_content = models.TextField()
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
# Proxy model to test overridden fields attrs on Post model so as not to
# interfere with other tests.
class FieldOverridePost(Post):
class Meta:
proxy = True
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Villain(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class SuperVillain(Villain):
pass
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.name
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, models.CASCADE, related_name='lead_plots')
contact = models.ForeignKey(Villain, models.CASCADE, related_name='contact_plots')
tags = GenericRelation(FunkyTag)
def __str__(self):
return self.name
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot, models.CASCADE, null=True, blank=True)
def __str__(self):
return self.details
class PlotProxy(Plot):
class Meta:
proxy = True
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain, models.CASCADE)
def __str__(self):
return self.location
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain, models.CASCADE)
def __str__(self):
return self.location
class Bookmark(models.Model):
name = models.CharField(max_length=60)
tag = GenericRelation(FunkyTag, related_query_name='bookmark')
def __str__(self):
return self.name
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo', models.CASCADE)
def __str__(self):
return self.name
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne, models.CASCADE)
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping', related_name='pizzas')
# Pizza's ModelAdmin has readonly_fields = ['toppings'].
# toppings is editable for this model's admin.
class ReadablePizza(Pizza):
class Meta:
proxy = True
class Album(models.Model):
owner = models.ForeignKey(User, models.SET_NULL, null=True, blank=True)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee, models.CASCADE)
class Question(models.Model):
question = models.CharField(max_length=20)
posted = models.DateField(default=datetime.date.today)
expires = models.DateTimeField(null=True, blank=True)
related_questions = models.ManyToManyField('self')
def __str__(self):
return self.question
class Answer(models.Model):
question = models.ForeignKey(Question, models.PROTECT)
answer = models.CharField(max_length=20)
def __str__(self):
return self.answer
class Answer2(Answer):
class Meta:
proxy = True
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
('bill', 'Bill G'),
('steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
('indian', 'A Taste of India'),
('thai', 'Thai Pography'),
('pizza', 'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __str__(self):
return self.author
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class ShortMessage(models.Model):
content = models.CharField(max_length=140)
timestamp = models.DateTimeField(null=True, blank=True)
class Telegram(models.Model):
title = models.CharField(max_length=30)
date_sent = models.DateField(null=True, blank=True)
def __str__(self):
return self.title
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PluggableSearchPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
# `db_index=False` because MySQL cannot index large CharField (#21196).
slug = models.SlugField(max_length=1000, db_index=False)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class Report(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(blank=True)
slug2 = models.SlugField(blank=True)
slug3 = models.SlugField(blank=True, allow_unicode=True)
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated, models.CASCADE)
name = models.CharField(max_length=75)
fk = models.ForeignKey('self', models.CASCADE, blank=True, null=True)
m2m = models.ManyToManyField('self', blank=True)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
class UndeletableObject(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
class UnchangeableObject(models.Model):
"""
Model whose change_view is disabled in admin
Refs #20640.
"""
class UserMessenger(models.Model):
"""
Dummy class for testing message_user functions on ModelAdmin
"""
class Simple(models.Model):
"""
Simple model with nothing on it for use in testing
"""
class Choice(models.Model):
choice = models.IntegerField(
blank=True, null=True,
choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion')),
)
class ParentWithDependentChildren(models.Model):
"""
Issue #20522
Model where the validation of child foreign-key relationships depends
on validation of the parent
"""
some_required_info = models.PositiveIntegerField()
family_name = models.CharField(max_length=255, blank=False)
class DependentChild(models.Model):
"""
Issue #20522
Model that depends on validation of the parent class for one of its
fields to validate during clean
"""
parent = models.ForeignKey(ParentWithDependentChildren, models.CASCADE)
family_name = models.CharField(max_length=255)
class _Manager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(pk__gt=1)
class FilteredManager(models.Model):
def __str__(self):
return "PK=%d" % self.pk
pk_gt_1 = _Manager()
objects = models.Manager()
class EmptyModelVisible(models.Model):
""" See ticket #11277. """
class EmptyModelHidden(models.Model):
""" See ticket #11277. """
class EmptyModelMixin(models.Model):
""" See ticket #11277. """
class State(models.Model):
name = models.CharField(max_length=100)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Restaurant(models.Model):
city = models.ForeignKey(City, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Worker(models.Model):
work_at = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
# Models for #23329
class ReferencedByParent(models.Model):
name = models.CharField(max_length=20, unique=True)
class ParentWithFK(models.Model):
fk = models.ForeignKey(
ReferencedByParent,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class ChildOfReferer(ParentWithFK):
pass
# Models for #23431
class InlineReferer(models.Model):
pass
class ReferencedByInline(models.Model):
name = models.CharField(max_length=20, unique=True)
class InlineReference(models.Model):
referer = models.ForeignKey(InlineReferer, models.CASCADE)
fk = models.ForeignKey(
ReferencedByInline,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
recipes = models.ManyToManyField(Recipe, through='RecipeIngredient')
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname')
recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
# Model for #23839
class NotReferenced(models.Model):
# Don't point any FK at this model.
pass
# Models for #23934
class ExplicitlyProvidedPK(models.Model):
name = models.IntegerField(primary_key=True)
class ImplicitlyGeneratedPK(models.Model):
name = models.IntegerField(unique=True)
# Models for #25622
class ReferencedByGenRel(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class GenRelReference(models.Model):
references = GenericRelation(ReferencedByGenRel)
class ParentWithUUIDPK(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=100)
def __str__(self):
return str(self.id)
class RelatedWithUUIDPKModel(models.Model):
parent = models.ForeignKey(ParentWithUUIDPK, on_delete=models.SET_NULL, null=True, blank=True)
class Author(models.Model):
pass
class Authorship(models.Model):
book = models.ForeignKey(Book, models.CASCADE)
author = models.ForeignKey(Author, models.CASCADE)
| bsd-3-clause | -4,959,838,720,506,171,000 | 24.787256 | 110 | 0.68136 | false |
SysTheron/adhocracy | src/adhocracy/config/environment.py | 2 | 4933 | """Pylons environment configuration"""
import os
import time
import sys
import traceback
from mako.lookup import TemplateLookup
from paste.deploy.converters import asbool
from pylons import tmpl_context as c
from pylons.error import handle_mako_error
from pylons.configuration import PylonsConfig
from sqlalchemy import engine_from_config
from sqlalchemy.interfaces import ConnectionProxy
import adhocracy.lib.app_globals as app_globals
import adhocracy.lib.helpers
from adhocracy.config.routing import make_map
from adhocracy.model import init_model
from adhocracy.lib.search import init_search
from adhocracy.lib.democracy import init_democracy
from adhocracy.lib.util import create_site_subdirectory
from adhocracy.lib import init_site
from adhocracy.lib.queue import RQConfig
def load_environment(global_conf, app_conf, with_db=True):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
# Pylons paths
conf_copy = global_conf.copy()
conf_copy.update(app_conf)
site_templates = create_site_subdirectory('templates', app_conf=conf_copy)
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
client_containing = app_conf.get('adhocracy.client_location')
if client_containing:
client_root = os.path.join(client_containing, 'adhocracy_client')
sys.path.insert(0, client_containing)
import adhocracy_client.static
sys.modules['adhocracy.static'] = adhocracy_client.static
else:
client_root = root
import adhocracy.static
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(client_root, 'static'),
templates=[site_templates,
os.path.join(client_root, 'templates')])
# Initialize config with the basic options
config = PylonsConfig()
config.init_app(global_conf, app_conf, package='adhocracy', paths=paths)
config['routes.map'] = make_map(config)
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.h'] = adhocracy.lib.helpers
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', default_filters=['escape'],
imports=['from markupsafe import escape'])
config['pylons.strict_tmpl_context'] = False
# Setup the SQLAlchemy database engine
engineOpts = {}
if asbool(config.get('adhocracy.debug.sql', False)):
engineOpts['connectionproxy'] = TimerProxy()
engine = engine_from_config(config, 'sqlalchemy.', **engineOpts)
init_model(engine)
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
init_site(config)
if with_db:
init_search()
init_democracy()
RQConfig.setup_from_config(config)
return config
class TimerProxy(ConnectionProxy):
'''
A timing proxy with code borrowed from spline and
pyramid_debugtoolbar. This will work for sqlalchemy 0.6,
but not 0.7. pyramid_debugtoolbar works for 0.7.
'''
def cursor_execute(self, execute, cursor, statement, parameters, context,
executemany):
start_time = time.time()
try:
return execute(cursor, statement, parameters, context)
finally:
duration = time.time() - start_time
# Find who spawned this query. Rewind up the stack until we
# escape from sqlalchemy code -- including this file, which
# contains proxy stuff
caller = '(unknown)'
for frame_file, frame_line, frame_func, frame_code in \
reversed(traceback.extract_stack()):
if __file__.startswith(frame_file) \
or '/sqlalchemy/' in frame_file:
continue
# OK, this is it
caller = "{0}:{1} in {2}".format(
frame_file, frame_line, frame_func)
break
# save interesting information for presentation later
try:
if not c.pdtb_sqla_queries:
c.pdtb_sqla_queries = []
queries = c.pdtb_sqla_queries
query_data = {
'duration': duration,
'statement': statement,
'parameters': parameters,
'context': context,
'caller': caller,
}
queries.append(query_data)
except TypeError:
# happens when sql is emitted before pylons has started
# or outside of a request
pass
| agpl-3.0 | 815,127,858,238,675,700 | 35.007299 | 78 | 0.631462 | false |
albemala/almoviesrenamer | src/ui/main_window_view.py | 1 | 7172 | from PyQt5.QtGui import QWindow
from PyQt5.QtQml import QQmlApplicationEngine
from ui.movie_table_item import MovieTableItem
LOADING_PANEL_VISIBLE_PROPERTY = "loadingPanelVisible"
LOADING_PANEL_MOVIE_TITLE_PROPERTY = "loadingPanelMovieTitle"
MOVIES_TABLE_MODEL_PROPERTY = "moviesTableModel"
MOVIES_TABLE_CURRENT_ROW_PROPERTY = "moviesTableCurrentRow"
MOVIES_TABLE_SELECTION_PROPERTY = "moviesTableSelection"
MOVIE_INFO_PANEL_VISIBLE_PROPERTY = "movieInfoPanelVisible"
MOVIE_ALTERNATIVE_TITLES_MODEL_PROPERTY = "movieAlternativeTitlesModel"
MOVIE_ALTERNATIVE_TITLE_INDEX_PROPERTY = "movieAlternativeTitleIndex"
MOVIE_TITLE_PROPERTY = "movieTitle"
MOVIE_ORIGINAL_TITLE_PROPERTY = "movieOriginalTitle"
MOVIE_YEAR_PROPERTY = "movieYear"
MOVIE_DIRECTORS_PROPERTY = "movieDirectors"
MOVIE_DURATION_PROPERTY = "movieDuration"
MOVIE_LANGUAGE_PROPERTY = "movieLanguage"
MOVIE_SEARCH_PROGRESS_BAR_VISIBLE_PROPERTY = "searchAlternativeMovieProgressBarVisible"
MOVIE_SEARCH_ALTERNATIVE_TITLE_PROPERTY = "searchAlternativeTitle"
MOVIE_SEARCH_ALTERNATIVE_YEAR_PROPERTY = "searchAlternativeYear"
MOVIE_SEARCH_ALTERNATIVE_LANGUAGE_PROPERTY = "searchAlternativeLanguage"
MOVIE_RENAMED_PANEL_VISIBLE_PROPERTY = "movieRenamedPanelVisible"
MOVIE_ERROR_PANEL_VISIBLE_PROPERTY = "movieErrorPanelVisible"
MOVIE_ERROR_PROPERTY = "movieError"
class MainWindowView:
def __init__(self):
self.__movies_table_view_model = []
self.__engine = QQmlApplicationEngine()
self.__engine.load("ui/main_window.qml")
def __get_root_window(self) -> QWindow:
return self.__engine.rootObjects()[0]
def __get_property(self, property_name: str):
return self.__get_root_window().property(property_name)
def __set_property(self, property_name: str, property_value):
return self.__get_root_window().setProperty(property_name, property_value)
def get_movies_table_current_row(self) -> int:
return self.__get_property(MOVIES_TABLE_CURRENT_ROW_PROPERTY)
def get_movies_table_selection(self) -> [int]:
# selection = self.__get_property(MOVIES_TABLE_SELECTION_PROPERTY)
selection = self.__get_root_window().getMoviesTableSelection()
# QJSValue to QVariant
variant = selection.toVariant()
# with a multiple selection, variant is a list of float
indices = []
for i in variant:
# float to int
indices.append(int(i))
return indices
def get_movie_search_alternative_title(self) -> str:
return self.__get_property(MOVIE_SEARCH_ALTERNATIVE_TITLE_PROPERTY)
def get_movie_search_alternative_year(self) -> str:
return self.__get_property(MOVIE_SEARCH_ALTERNATIVE_YEAR_PROPERTY)
def get_movie_search_alternative_language(self) -> str:
return self.__get_property(MOVIE_SEARCH_ALTERNATIVE_LANGUAGE_PROPERTY)
def set_loading_panel_movie_title(self, loading_info: str) -> None:
self.__set_property(LOADING_PANEL_MOVIE_TITLE_PROPERTY, loading_info)
def set_loading_panel_visible(self, visible: bool) -> None:
self.__set_property(LOADING_PANEL_VISIBLE_PROPERTY, visible)
def set_movie_info_panel_visible(self, visible: bool) -> None:
self.__set_property(MOVIE_INFO_PANEL_VISIBLE_PROPERTY, visible)
def set_movie_renamed_panel_visible(self, visible: bool) -> None:
self.__set_property(MOVIE_RENAMED_PANEL_VISIBLE_PROPERTY, visible)
def set_movie_error_panel_visible(self, visible: bool) -> None:
self.__set_property(MOVIE_ERROR_PANEL_VISIBLE_PROPERTY, visible)
def set_movie_search_progress_bar_visible(self, visible: bool) -> None:
self.__set_property(MOVIE_SEARCH_PROGRESS_BAR_VISIBLE_PROPERTY, visible)
def add_movie_table_item(self, original_name: str, new_name: str) -> None:
movie_table_item = MovieTableItem(original_name, new_name)
self.__movies_table_view_model.append(movie_table_item)
# From Qt Documentation:
# Note: There is no way for the view to know that the contents of a QList has changed.
# If the QList changes, it is necessary to reset the model by calling QQmlContext::setContextProperty() again.
self.__set_property(MOVIES_TABLE_MODEL_PROPERTY, self.__movies_table_view_model)
def remove_movie_table_item(self, index: int) -> None:
del self.__movies_table_view_model[index]
self.__set_property(MOVIES_TABLE_MODEL_PROPERTY, self.__movies_table_view_model)
def remove_all_movie_table_items(self) -> None:
del self.__movies_table_view_model[:]
self.__set_property(MOVIES_TABLE_MODEL_PROPERTY, self.__movies_table_view_model)
def set_movie_alternative_titles_model(self, model: []) -> None:
self.__set_property(MOVIE_ALTERNATIVE_TITLES_MODEL_PROPERTY, model)
def set_movie_title(self, movie_title: str) -> None:
self.__set_property(MOVIE_TITLE_PROPERTY, movie_title)
def set_movie_original_title(self, movie_original_title: str) -> None:
self.__set_property(MOVIE_ORIGINAL_TITLE_PROPERTY, movie_original_title)
def set_movie_year(self, movie_year: str) -> None:
self.__set_property(MOVIE_YEAR_PROPERTY, movie_year)
def set_movie_directors(self, movie_directors) -> None:
self.__set_property(MOVIE_DIRECTORS_PROPERTY, movie_directors)
def set_movie_duration(self, movie_duration) -> None:
self.__set_property(MOVIE_DURATION_PROPERTY, movie_duration)
def set_movie_language(self, movie_language) -> None:
self.__set_property(MOVIE_LANGUAGE_PROPERTY, movie_language)
def set_movie_alternative_title_index(self, index: int) -> None:
self.__set_property(MOVIE_ALTERNATIVE_TITLE_INDEX_PROPERTY, index)
def set_movie_error(self, movie_error: str) -> None:
self.__set_property(MOVIE_ERROR_PROPERTY, movie_error)
def get_add_movies_clicked_signal(self):
return self.__get_root_window().addMoviesClicked
def get_add_movies_in_folder_clicked_signal(self):
return self.__get_root_window().addMoviesInFolderClicked
def get_add_movies_in_folder_and_subfolders_clicked_signal(self):
return self.__get_root_window().addMoviesInFolderAndSubfoldersClicked
def get_remove_selected_movies_clicked_signal(self):
return self.__get_root_window().removeSelectedMoviesClicked
def get_remove_all_movies_clicked_signal(self):
return self.__get_root_window().removeAllMoviesClicked
def get_show_renaming_rule_dialog_clicked_signal(self):
return self.__get_root_window().showRenamingRuleDialogClicked
def get_rename_movies_clicked_signal(self):
return self.__get_root_window().renameMoviesClicked
def get_movie_item_selected_signal(self):
return self.__get_root_window().movieSelected
def get_movie_alternative_title_changed_signal(self):
return self.__get_root_window().movieAlternativeTitleChanged
def get_search_movie_clicked_signal(self):
return self.__get_root_window().searchMovieClicked
def get_movies_selection_changed_signal(self):
return self.__get_root_window().moviesSelectionChanged
| gpl-3.0 | 835,110,258,699,989,500 | 41.690476 | 118 | 0.714724 | false |
zbuc/mitmproxy | libmproxy/filt.py | 13 | 10005 | """
The following operators are understood:
~q Request
~s Response
Headers:
Patterns are matched against "name: value" strings. Field names are
all-lowercase.
~a Asset content-type in response. Asset content types are:
text/javascript
application/x-javascript
application/javascript
text/css
image/*
application/x-shockwave-flash
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response
~b rex Expression in the body of either request or response
~bq rex Expression in the body of request
~bq rex Expression in the body of response
~t rex Shortcut for content-type header.
~d rex Request domain
~m rex Method
~u rex URL
~c CODE Response code.
rex Equivalent to ~u rex
"""
from __future__ import absolute_import
import re
import sys
import pyparsing as pp
class _Token:
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t" * indent, self.__class__.__name__,
if hasattr(self, "expr"):
print >> fp, "(%s)" % self.expr,
print >> fp
class _Action(_Token):
@classmethod
def make(klass, s, loc, toks):
return klass(*toks[1:])
class FErr(_Action):
code = "e"
help = "Match error"
def __call__(self, f):
return True if f.error else False
class FReq(_Action):
code = "q"
help = "Match request with no response"
def __call__(self, f):
if not f.response:
return True
class FResp(_Action):
code = "s"
help = "Match response"
def __call__(self, f):
return True if f.response else False
class _Rex(_Action):
flags = 0
def __init__(self, expr):
self.expr = expr
try:
self.re = re.compile(self.expr, self.flags)
except:
raise ValueError("Cannot compile expression.")
def _check_content_type(expr, o):
val = o.headers.get("content-type")
if val and re.search(expr, val):
return True
return False
class FAsset(_Action):
code = "a"
help = "Match asset in response: CSS, Javascript, Flash, images."
ASSET_TYPES = [
"text/javascript",
"application/x-javascript",
"application/javascript",
"text/css",
"image/.*",
"application/x-shockwave-flash"
]
def __call__(self, f):
if f.response:
for i in self.ASSET_TYPES:
if _check_content_type(i, f.response):
return True
return False
class FContentType(_Rex):
code = "t"
help = "Content-type header"
def __call__(self, f):
if _check_content_type(self.expr, f.request):
return True
elif f.response and _check_content_type(self.expr, f.response):
return True
return False
class FRequestContentType(_Rex):
code = "tq"
help = "Request Content-Type header"
def __call__(self, f):
return _check_content_type(self.expr, f.request)
class FResponseContentType(_Rex):
code = "ts"
help = "Response Content-Type header"
def __call__(self, f):
if f.response:
return _check_content_type(self.expr, f.response)
return False
class FHead(_Rex):
code = "h"
help = "Header"
flags = re.MULTILINE
def __call__(self, f):
if f.request and self.re.search(str(f.request.headers)):
return True
if f.response and self.re.search(str(f.response.headers)):
return True
return False
class FHeadRequest(_Rex):
code = "hq"
help = "Request header"
flags = re.MULTILINE
def __call__(self, f):
if f.request and self.re.search(str(f.request.headers)):
return True
class FHeadResponse(_Rex):
code = "hs"
help = "Response header"
flags = re.MULTILINE
def __call__(self, f):
if f.response and self.re.search(str(f.response.headers)):
return True
class FBod(_Rex):
code = "b"
help = "Body"
def __call__(self, f):
if f.request and f.request.content:
if self.re.search(f.request.get_decoded_content()):
return True
if f.response and f.response.content:
if self.re.search(f.response.get_decoded_content()):
return True
return False
class FBodRequest(_Rex):
code = "bq"
help = "Request body"
def __call__(self, f):
if f.request and f.request.content:
if self.re.search(f.request.get_decoded_content()):
return True
class FBodResponse(_Rex):
code = "bs"
help = "Response body"
def __call__(self, f):
if f.response and f.response.content:
if self.re.search(f.response.get_decoded_content()):
return True
class FMethod(_Rex):
code = "m"
help = "Method"
flags = re.IGNORECASE
def __call__(self, f):
return bool(self.re.search(f.request.method))
class FDomain(_Rex):
code = "d"
help = "Domain"
flags = re.IGNORECASE
def __call__(self, f):
return bool(self.re.search(f.request.host))
class FUrl(_Rex):
code = "u"
help = "URL"
# FUrl is special, because it can be "naked".
@classmethod
def make(klass, s, loc, toks):
if len(toks) > 1:
toks = toks[1:]
return klass(*toks)
def __call__(self, f):
return self.re.search(f.request.url)
class FSrc(_Rex):
code = "src"
help = "Match source address"
def __call__(self, f):
return f.client_conn.address and self.re.search(repr(f.client_conn.address))
class FDst(_Rex):
code = "dst"
help = "Match destination address"
def __call__(self, f):
return f.server_conn.address and self.re.search(repr(f.server_conn.address))
class _Int(_Action):
def __init__(self, num):
self.num = int(num)
class FCode(_Int):
code = "c"
help = "HTTP response code"
def __call__(self, f):
if f.response and f.response.status_code == self.num:
return True
class FAnd(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t" * indent, self.__class__.__name__
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return all(i(f) for i in self.lst)
class FOr(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t" * indent, self.__class__.__name__
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return any(i(f) for i in self.lst)
class FNot(_Token):
def __init__(self, itm):
self.itm = itm[0]
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t" * indent, self.__class__.__name__
self.itm.dump(indent + 1, fp)
def __call__(self, f):
return not self.itm(f)
filt_unary = [
FReq,
FResp,
FAsset,
FErr
]
filt_rex = [
FHeadRequest,
FHeadResponse,
FHead,
FBodRequest,
FBodResponse,
FBod,
FMethod,
FDomain,
FUrl,
FRequestContentType,
FResponseContentType,
FContentType,
FSrc,
FDst,
]
filt_int = [
FCode
]
def _make():
# Order is important - multi-char expressions need to come before narrow
# ones.
parts = []
for klass in filt_unary:
f = pp.Literal("~%s" % klass.code) + pp.WordEnd()
f.setParseAction(klass.make)
parts.append(f)
simplerex = "".join(c for c in pp.printables if c not in "()~'\"")
rex = pp.Word(simplerex) |\
pp.QuotedString("\"", escChar='\\') |\
pp.QuotedString("'", escChar='\\')
for klass in filt_rex:
f = pp.Literal("~%s" % klass.code) + pp.WordEnd() + rex.copy()
f.setParseAction(klass.make)
parts.append(f)
for klass in filt_int:
f = pp.Literal("~%s" % klass.code) + pp.WordEnd() + pp.Word(pp.nums)
f.setParseAction(klass.make)
parts.append(f)
# A naked rex is a URL rex:
f = rex.copy()
f.setParseAction(FUrl.make)
parts.append(f)
atom = pp.MatchFirst(parts)
expr = pp.operatorPrecedence(atom,
[(pp.Literal("!").suppress(),
1,
pp.opAssoc.RIGHT,
lambda x: FNot(*x)),
(pp.Literal("&").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FAnd(*x)),
(pp.Literal("|").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FOr(*x)),
])
expr = pp.OneOrMore(expr)
return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)
bnf = _make()
def parse(s):
try:
filt = bnf.parseString(s, parseAll=True)[0]
filt.pattern = s
return filt
except pp.ParseException:
return None
except ValueError:
return None
help = []
for i in filt_unary:
help.append(
("~%s" % i.code, i.help)
)
for i in filt_rex:
help.append(
("~%s regex" % i.code, i.help)
)
for i in filt_int:
help.append(
("~%s int" % i.code, i.help)
)
help.sort()
help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
| mit | 1,328,500,808,804,640,300 | 22.708531 | 84 | 0.522939 | false |
drayanaindra/shoop | shoop/default_tax/module.py | 4 | 2653 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.utils.translation import ugettext_lazy as _
from shoop.core import taxing
from shoop.core.pricing import TaxfulPrice, TaxlessPrice
from shoop.core.taxing._context import TaxingContext
from shoop.core.taxing.utils import stacked_value_added_taxes
from shoop.default_tax.models import TaxRule
from shoop.utils.iterables import first
class DefaultTaxModule(taxing.TaxModule):
identifier = "default_tax"
name = _("Default Taxation")
def determine_product_tax(self, context, product):
"""
:type context: shoop.core.contexts.PriceTaxContext
:type product: shoop.core.models.Product
"""
price = product.get_price(context)
return _calculate_taxes(
price,
taxing_context=context.taxing_context,
tax_class=product.tax_class,
)
def get_line_taxes(self, source_line):
"""
:type source_line: shoop.core.order_creator.SourceLine
:rtype: Iterable[LineTax]
"""
taxing_context = TaxingContext(
customer_tax_group=_resolve(source_line, 'source.customer.tax_group'),
location=_resolve(source_line, 'source.billing_address'),
)
return _calculate_taxes(
source_line.total_price,
taxing_context=taxing_context,
tax_class=source_line.get_tax_class(),
).taxes
def _calculate_taxes(price, taxing_context, tax_class):
customer_tax_group = taxing_context.customer_tax_group
# Check tax exempt
# TODO: Should this be done in some better way?
if customer_tax_group and customer_tax_group.identifier == 'tax_exempt':
return taxing.TaxedPrice(
TaxfulPrice(price.amount), TaxlessPrice(price.amount), []
)
tax_rules = TaxRule.objects.filter(enabled=True, tax_classes=tax_class)
if customer_tax_group:
tax_rules = tax_rules.filter(customer_tax_groups=customer_tax_group)
tax_rules = tax_rules.order_by("-priority") # TODO: Do the Right Thing with priority
taxes = [tax_rule for tax_rule in tax_rules if tax_rule.matches(taxing_context)]
tax_rule = first(taxes) # TODO: Do something better than just using the first tax!
tax = getattr(tax_rule, "tax", None)
return stacked_value_added_taxes(price, [tax] if tax else [])
def _resolve(obj, path):
for name in path.split('.'):
obj = getattr(obj, name, None)
return obj
| agpl-3.0 | 397,524,856,882,960,450 | 36.366197 | 89 | 0.671315 | false |
alonisser/Open-Knesset | user/tests.py | 6 | 8945 | import datetime
import json
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from actstream import action, follow, unfollow
from mks.models import Member, Knesset
from laws.models import Bill
from committees.models import Committee
from agendas.models import Agenda
class TestProfile(TestCase):
def setUp(self):
self.knesset = Knesset.objects.create(
number=1,
start_date=datetime.date.today() - datetime.timedelta(10))
self.jacob = User.objects.create_user('jacob', '[email protected]',
'JKM')
self.adrian = User.objects.create_user('adrian', '[email protected]',
'adrian')
profile = self.adrian.profiles.get()
profile.public_profile = False
profile.save()
def testPublicProfile(self):
res = self.client.get(reverse('public-profile',
kwargs={'pk': self.jacob.id}))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
'user/public_profile.html')
self.assertEqual(res.context['viewed_user'], self.jacob)
res = self.client.get(reverse('public-profile',
kwargs={'pk': self.adrian.id}))
self.assertEqual(res.status_code, 200)
self.assertFalse('"details"' in res.content) # seems like profile is
# public, even though it should not be
def testProfileList(self):
res = self.client.get(reverse('profile-list'))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,'user/profile_list.html')
self.assertEqual(len(res.context['object_list']), 1)
def testSignup(self):
res = self.client.post(reverse('register'), {'username': 'john',
'password1': '123', 'password2': '123',
'email': '[email protected]', 'email_notification': 'D'},
follow = True)
self.assertEqual(res.redirect_chain, [('http://testserver/users/edit-profile/', 302)])
new = User.objects.get(username='john')
new_profile = new.profiles.get()
self.assertEqual(new_profile.email_notification, 'D')
def test_no_double_signup(self):
"Don't allow new registration with an exiting email"
res = self.client.post(
reverse('register'), {
'username': 'first_jack',
'password1': '123', 'password2': '123',
'email': '[email protected]',
'email_notification': 'D'
},
follow=True)
self.assertEqual(res.redirect_chain, [('http://testserver/users/edit-profile/', 302)])
res = self.client.post(
reverse('register'), {
'username': 'double_jack',
'password1': '123', 'password2': '123',
'email': '[email protected]',
'email_notification': 'D'
},
follow=True)
# Now try to create another user with some email
self.assertRegexpMatches(res.content, 'error_\d+_id_email')
def tearDown(self):
self.jacob.delete()
self.adrian.delete()
self.knesset.delete()
class TestFollowing(TestCase):
def setUp(self):
self.knesset = Knesset.objects.create(
number=1,
start_date=datetime.date.today() - datetime.timedelta(10))
self.jacob = User.objects.create_user('jacob', '[email protected]',
'JKM')
self.david = Member.objects.create(name='david', start_date=datetime.date(2010,1,1))
self.yosef = Member.objects.create(name='yosef', start_date=datetime.date(2010,1,1))
self.moshe = Member.objects.create(name='moshe', start_date=datetime.date(2010,1,1))
self.agenda_1 = Agenda.objects.create(name='agenda_1')
self.committee_1 = Committee.objects.create(name='c1')
self.meeting_1 = self.committee_1.meetings.create(date=datetime.datetime.now(),
protocol_text='m1')
self.meeting_1.create_protocol_parts()
action.send(self.jacob, verb='farted', target=self.david)
action.send(self.jacob, verb='hit', target=self.yosef)
action.send(self.jacob, verb='hit', target=self.moshe)
self.bill_1 = Bill.objects.create(stage='1', title='bill 1', popular_name="The Bill")
def testUnfollowMeeting(self):
follow(self.jacob, self.meeting_1)
p = self.jacob.profiles.get()
self.assertEquals(len(p.meetings), 1)
loggedin = self.client.login(username='jacob', password='JKM')
self.assertTrue(loggedin)
response = self.client.post(reverse('user-follow-unfollow'),
{'what': 'meeting',
'id': self.meeting_1.id,
'verb':'unfollow'})
self.assertEquals(len(p.members), 0)
def test_following_members(self):
"""Test the following and unfollowing members using the
generic follow method.
"""
p = self.jacob.profiles.get()
loggedin = self.client.login(username='jacob', password='JKM')
self.assertTrue(loggedin)
response = self.client.post(reverse('user-follow-unfollow'),
{'id': self.david.id,
'what': 'member',
'verb': 'follow'})
self.assertEquals(response.status_code, 200)
self.assertEquals(p.members[0], self.david)
response = self.client.post(reverse('user-follow-unfollow'),
{'id': self.yosef.id,
'what': 'member',
'verb': 'follow'})
self.assertEquals(len(p.members), 2)
response = self.client.post(reverse('user-follow-unfollow'),
{'id': self.david.id,
'what':'member',
'verb':'unfollow'})
self.assertEquals(len(p.members), 1)
self.assertEquals(p.members[0], self.yosef)
response = self.client.post(reverse('user-follow-unfollow'),
{'id': self.yosef.id,
'what': 'member',
'verb': 'unfollow'})
self.assertEquals(len(p.members), 0)
self.client.logout()
def test_following_bills(self):
"""Test the following and unfollowing a bill using the
generic follow method.
"""
p = self.jacob.profiles.get()
loggedin = self.client.login(username='jacob', password='JKM')
self.assertTrue(loggedin)
response = self.client.post(reverse('user-follow-unfollow'),
{'id': self.bill_1.id,
'what': 'bill',
'verb': 'follow'})
self.assertEquals(response.status_code, 200)
self.assertEquals(p.bills[0], self.bill_1)
response = self.client.post(reverse('user-follow-unfollow'),
{'id': self.bill_1.id,
'what': 'bill',
'verb': 'unfollow'})
self.assertEquals(len(p.bills), 0)
self.client.logout()
def test_is_following(self):
"""Test the is-following query"""
p = self.jacob.profiles.get()
loggedin = self.client.login(username='jacob', password='JKM')
self.assertTrue(loggedin)
follow(self.jacob, self.bill_1)
response = self.client.get(reverse('user-is-following'),
{'id': self.bill_1.id,
'what': 'bill'})
self.assertEquals(response.status_code, 200)
res_obj = json.loads(response.content)
self.assertTrue(res_obj['watched'])
unfollow(self.jacob, self.bill_1)
response = self.client.get(reverse('user-is-following'),
{'id': self.bill_1.id,
'what': 'bill'})
self.assertEquals(response.status_code, 200)
res_obj = json.loads(response.content)
self.assertFalse(res_obj['watched'])
self.client.logout()
def tearDown(self):
self.jacob.delete()
self.david.delete()
self.yosef.delete()
self.moshe.delete()
self.bill_1.delete()
self.agenda_1.delete()
self.committee_1.delete()
self.meeting_1.delete()
self.knesset.delete()
| bsd-3-clause | 8,114,460,337,536,239,000 | 42.42233 | 94 | 0.537731 | false |
SinishaDjukic/Meshwork | BuildSystem/PlatformIO/platform/cosa/builder/frameworks/cosa.py | 1 | 5092 | """
PlatformIO Framework for Cosa
-----------------------------
This PlaformIO framework for Cosa is developed and maintained by Sinisha Djukic as part of the Meshwork project.
NOTE: This framework provides UNOFFICIAL support for Cosa and is not in any way associated with the Cosa project or its author Mikael Patel. In case of questions, ideas, feature and change requests please visit:
https://github.com/SinishaDjukic/Meshwork
Cosa
----
Cosa is an object-oriented platform for Arduino. It replaces the Arduino and Wiring library with a large set of integrated classes that support the full range of AVR/ATmega/ATtiny internal hardware modules; all pin modes, Digital, and Analog Pins, External and Pin Change Interrupts, Analog Comparator, PWM, Watchdog, Timer0/Timer2 (RTT), Timer1 (Servo/Tone/VWI), Input Capture, UART, USI, SPI, TWI and EEPROM. Cosa supports several programming paradigms including Multi-Tasking, Event Driven Programming and UML Capsules/Actors. Cosa contains over 200 classes and nearly as many example sketches to get started.
For more information:
https://github.com/mikaelpatel/Cosa/
"""
import os
import string
from os import listdir, walk
from os.path import isdir, isfile, join
from SCons.Script import DefaultEnvironment
from platformio import util
env = DefaultEnvironment()
platform = env.PioPlatform()
BOARD_OPTS = env.BoardConfig()
BOARD_BUILDOPTS = BOARD_OPTS.get("build", {})
BOARD_CORELIBDIRNAME = "cosa"
BOARD_VARIANTLIBDIRNAME = string.replace(str(BOARD_BUILDOPTS.get("variant")), "/", os.sep)
# IMPORTANT: PlatformIO packages dir is expected to have the following folder:
# - framework-cosa (symlink to <Cosa project root>)
FRAMEWORK_DIR = platform.get_package_dir("framework-cosa")
TOOLCHAIN_DIR = platform.get_package_dir("toolchain-atmelavr")
PLATFORMFW_DIR = FRAMEWORK_DIR
lib_dirs = env.get("LIBSOURCE_DIRS")
project_lib_dir = util.get_projectlib_dir()
for _, subdirs, _ in walk(project_lib_dir):
# print "Adding project libraries:"
for dir in subdirs:
lib_dirs.append(join(project_lib_dir, dir))
# print join(project_lib_dir, dir)
break
# Cosa
PLATFORMFW_LIBRARIES_DIR = join(PLATFORMFW_DIR, "libraries")
lib_dirs.append(PLATFORMFW_LIBRARIES_DIR)
lib_dirs.append(join(PLATFORMFW_DIR, "cores", BOARD_CORELIBDIRNAME))
lib_dirs.append(join(PLATFORMFW_DIR, "variants", BOARD_VARIANTLIBDIRNAME))
for _, subdirs, _ in walk(PLATFORMFW_LIBRARIES_DIR):
# print "Adding Cosa libraries:"
for dir in subdirs:
lib_dirs.append(join(PLATFORMFW_LIBRARIES_DIR, dir))
# print join(PLATFORMFW_LIBRARIES_DIR, dir)
break
# AVR
lib_dirs.append(join(TOOLCHAIN_DIR, "avr", "include"))
env.Replace(PLATFORMFW_DIR=PLATFORMFW_DIR)
env.Replace(LIBSOURCE_DIRS=lib_dirs)
print "LIBSOURCE_DIRS"
print lib_dirs
#
# Base
#
#ARDUINO_VERSION = int(
# open(join(env.subst("$PLATFORMFW_DIR"),
# "platform.txt")).read().replace(".", "").strip())
#TODO: Temporarily hardcoded
ARDUINO_VERSION = 1610
# usb flags
ARDUINO_USBDEFINES = []
if "usb_product" in BOARD_BUILDOPTS:
ARDUINO_USBDEFINES = [
"USB_VID=${BOARD_OPTIONS['build']['vid']}",
"USB_PID=${BOARD_OPTIONS['build']['pid']}",
"USB_MANUFACTURER=${BOARD_OPTIONS['build']['manufacturer']}",
'USB_PRODUCT=\\"%s\\"' % (env.subst(
"${BOARD_OPTIONS['build']['usb_product']}").replace('"', ""))
]
ARDUINO_USBDEFINES += ["ARDUINO=%d" % ARDUINO_VERSION]
#
# Compiler flags
#
env.Append(
CPPDEFINES=ARDUINO_USBDEFINES
)
env.Replace(
CFLAGS=[
#"-Wall", "-Wextra",
"-std=gnu11", "-c", "-g", "-Os", "-ffunction-sections", "-fdata-sections", "-MMD", "-flto"
]
)
env.Replace(
CCFLAGS=[
#"-Wall", "-Wextra",
# "-c", "-g", "-Os", "-ffunction-sections", "-fdata-sections", "-MMD", "-flto"
"-g", # include debugging info (so errors include line numbers)
"-Os", # optimize for size
#"-Wall", # show warnings
"-ffunction-sections", # place each function in its own section
"-fdata-sections",
"-mmcu=$BOARD_MCU"
]
)
#compiler.c.flags=-c -g -Os -Wall -ffunction-sections -fdata-sections -MMD -mmcu={build.mcu} -DF_CPU={build.f_cpu} -DARDUINO={runtime.ide.version}
#compiler.cpp.flags=-c -g -Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -MMD -mmcu={build.mcu} -DF_CPU={build.f_cpu} -DARDUINO={runtime.ide.version}
env.Replace(
CPPFLAGS=[
#"-Wall", "-Wextra",
"-c", "-g", "-Os", "-fno-exceptions", "-ffunction-sections", "-fdata-sections", "-MMD", "-Woverloaded-virtual", "-flto", "-std=gnu++11", "-felide-constructors", "-fno-implement-inlines", "-fno-rtti", "-fno-threadsafe-statics", "-mcall-prologues"
]
)
#
# Target: Build Core Library
#
libs = []
env.Append(
CPPPATH=[lib_dirs]
)
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduinoVariant"),
join(PLATFORMFW_DIR, "variants", BOARD_VARIANTLIBDIRNAME)
)
)
envsafe = env.Clone()
libs.append(envsafe.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduino"),
join("$PLATFORMFW_DIR", "cores", BOARD_CORELIBDIRNAME)
)
)
env.Append(LIBS=libs)
| lgpl-2.1 | 5,114,894,481,302,135,000 | 30.627329 | 612 | 0.693637 | false |
theQRL/QRL | src/qrl/core/VoteStats.py | 1 | 11441 | from pyqrllib.pyqrllib import bin2hstr, QRLHelper
from qrl.generated import qrl_pb2
from qrl.core.misc import logger
from qrl.core.StateContainer import StateContainer
from qrl.core.PaginatedData import PaginatedData
from qrl.core.txs.multisig.MultiSigVote import MultiSigVote
from qrl.core.State import State
class VoteStats:
def __init__(self, protobuf_block=None):
self._data = protobuf_block
if protobuf_block is None:
self._data = qrl_pb2.VoteStats()
@property
def pbdata(self):
return self._data
def is_active(self, current_block_number) -> bool:
return not self.executed and current_block_number <= self.expiry_block_number
@property
def multi_sig_address(self):
return self._data.multi_sig_address
@property
def expiry_block_number(self):
return self._data.expiry_block_number
@property
def shared_key(self):
return self._data.shared_key
@property
def signatories(self):
return self._data.signatories
@property
def tx_hashes(self):
return self._data.tx_hashes
@property
def unvotes(self):
return self._data.unvotes
@property
def total_weight(self):
return self._data.total_weight
@property
def executed(self):
return self._data.executed
def update_total_weight(self, value, subtract):
if subtract:
self._data.total_weight -= value
else:
self._data.total_weight += value
def get_address_index(self, address: bytes):
for i in range(len(self.signatories)):
if address == self.signatories[i]:
return i
return -1
def get_unvote_by_address(self, address) -> [bool, int]:
i = self.get_address_index(address)
if i != -1:
return self.unvotes[i], i
return False, -1
def get_vote_tx_hash_by_signatory_address(self, address):
i = self.get_address_index(address)
return self.tx_hashes[i]
def apply_vote_stats(self,
tx: MultiSigVote,
weight: int,
state_container: StateContainer) -> bool:
if state_container.block_number > self.expiry_block_number:
return False
i = self.get_address_index(tx.addr_from)
if i == -1:
return False
if tx.unvote == self.unvotes[i]:
return False
self._data.tx_hashes[i] = tx.txhash
if tx.unvote:
self._data.total_weight -= weight
else:
self._data.total_weight += weight
self._data.unvotes[i] = tx.unvote
multi_sig_spend = state_container.multi_sig_spend_txs[self.shared_key]
threshold = state_container.addresses_state[self.multi_sig_address].threshold
# TODO: return bool response of apply function
self.apply(state_container,
multi_sig_spend,
state_container.addresses_state,
state_container.paginated_tx_hash,
state_container.block_number,
threshold)
return True
def revert_vote_stats(self,
tx: MultiSigVote,
weight: int,
state_container: StateContainer) -> bool:
if state_container.block_number > self.expiry_block_number:
return False
i = self.get_address_index(tx.addr_from)
if i == -1:
return False
if tx.unvote != self.unvotes[i]:
return False
if self._data.tx_hashes[i] != tx.txhash:
return False
multi_sig_spend = state_container.multi_sig_spend_txs[self.shared_key]
threshold = state_container.addresses_state[self.multi_sig_address].threshold
self.revert(state_container,
multi_sig_spend,
state_container.addresses_state,
state_container.paginated_tx_hash,
state_container.block_number,
threshold)
self._data.tx_hashes[i] = tx.prev_tx_hash
if tx.unvote:
self._data.total_weight += weight
else:
self._data.total_weight -= weight
self._data.unvotes[i] = not tx.unvote
return True
@staticmethod
def create(multi_sig_address: bytes,
shared_key: bytes,
signatories: bytes,
expiry_block_number: int):
vote_stats = VoteStats()
vote_stats._data.multi_sig_address = multi_sig_address
vote_stats._data.shared_key = shared_key
vote_stats._data.expiry_block_number = expiry_block_number
for signatory in signatories:
vote_stats._data.signatories.append(signatory)
vote_stats._data.tx_hashes.append(b'')
vote_stats._data.unvotes.append(True)
return vote_stats
def apply(self,
state_container,
multi_sig_spend,
addresses_state: dict,
paginated_tx_hash: PaginatedData,
current_block_number: int,
threshold: int) -> bool:
# TODO: return False if executed
if self.executed:
return True
if self.total_weight < threshold:
return False
if current_block_number > self.expiry_block_number:
return False
if multi_sig_spend.total_amount > addresses_state[self.multi_sig_address].balance:
logger.info("[VoteStats] Insufficient funds to execute Multi Sig Spend")
logger.info("Multi Sig Spend Amount: %s, Funds Available: %s",
multi_sig_spend.total_amount,
addresses_state[self.multi_sig_address].balance)
logger.info("Multi Sig Spend txn hash: %s", bin2hstr(multi_sig_spend.txhash))
logger.info("Multi Sig Address: %s", bin2hstr(multi_sig_spend.multi_sig_address))
return False
addresses_state[self.multi_sig_address].update_balance(state_container,
multi_sig_spend.total_amount,
subtract=True)
addr_from_pk = bytes(QRLHelper.getAddress(multi_sig_spend.PK))
for index in range(0, len(multi_sig_spend.addrs_to)):
addr_to = multi_sig_spend.addrs_to[index]
address_state = addresses_state[addr_to]
if addr_to not in (multi_sig_spend.addr_from, addr_from_pk):
paginated_tx_hash.insert(address_state, multi_sig_spend.txhash)
address_state.update_balance(state_container, multi_sig_spend.amounts[index])
self._data.executed = True
return True
def revert(self,
state_container,
multi_sig_spend,
addresses_state: dict,
paginated_tx_hash: PaginatedData,
current_block_number: int,
threshold: int) -> bool:
if not self.executed:
return True
if self.total_weight < threshold:
return False
if current_block_number > self.expiry_block_number:
return False
addresses_state[self.multi_sig_address].update_balance(state_container, multi_sig_spend.total_amount)
addr_from_pk = bytes(QRLHelper.getAddress(multi_sig_spend.PK))
for index in range(0, len(multi_sig_spend.addrs_to)):
addr_to = multi_sig_spend.addrs_to[index]
address_state = addresses_state[addr_to]
if addr_to not in (multi_sig_spend.addr_from, addr_from_pk):
paginated_tx_hash.remove(address_state, multi_sig_spend.txhash)
address_state.update_balance(state_container, multi_sig_spend.amounts[index], subtract=True)
self._data.executed = False
return True
def serialize(self):
return self._data.SerializeToString()
@staticmethod
def deserialize(data):
pbdata = qrl_pb2.VoteStats()
pbdata.ParseFromString(bytes(data))
return VoteStats(pbdata)
def put_state(self, state: State, batch):
try:
state._db.put_raw(b'shared_key_' + self.shared_key, self.serialize(), batch)
except Exception as e:
raise Exception("[put_state] Exception in VoteStats %s", e)
@staticmethod
def delete_state(state: State, shared_key: bytes, batch):
try:
state._db.delete(b'shared_key_' + shared_key, batch)
except Exception as e:
raise Exception("[delete_state] Exception in VoteStats %s", e)
@staticmethod
def get_state(state: State, shared_key):
try:
data = state._db.get_raw(b'shared_key_' + shared_key)
return VoteStats.deserialize(data)
except KeyError:
logger.debug('[get_state] VoteStats %s not found', bin2hstr(shared_key).encode())
except Exception as e:
logger.error('[get_state] %s', e)
return None
# @staticmethod
# def apply_and_put(state: State,
# state_container: StateContainer):
# for key in state_container.votes_stats:
# vote_stats = state_container.votes_stats[key]
# multi_sig_spend = state_container.multi_sig_spend_txs[vote_stats.shared_key]
# threshold = state_container.addresses_state[vote_stats.multi_sig_address].threshold
#
# vote_stats.apply(state_container,
# multi_sig_spend,
# state_container.addresses_state,
# state_container.paginated_tx_hash,
# state_container.block_number,
# threshold)
# vote_stats.put_state(state, state_container.batch)
#
# return True
#
# @staticmethod
# def revert_and_put(state: State,
# state_container: StateContainer):
# for key in state_container.votes_stats:
# vote_stats = state_container.votes_stats[key]
# multi_sig_spend = state_container.multi_sig_spend_txs[vote_stats.shared_key]
# threshold = state_container.addresses_state[vote_stats.multi_sig_address].threshold
#
# vote_stats.revert(state_container,
# multi_sig_spend,
# state_container.addresses_state,
# state_container.paginated_tx_hash,
# state_container.block_number,
# threshold)
# vote_stats.put_state(state, state_container.batch)
#
# return True
@staticmethod
def put_all(state: State,
state_container: StateContainer):
for key in state_container.votes_stats:
vote_stats = state_container.votes_stats[key]
vote_stats.put_state(state, state_container.batch)
return True
@staticmethod
def revert_all(state: State,
state_container: StateContainer):
for key in state_container.votes_stats:
vote_stats = state_container.votes_stats[key]
vote_stats.put_state(state, state_container.batch)
return True
| mit | -743,684,906,503,366,000 | 33.775076 | 109 | 0.577397 | false |
taktik/account-invoicing | __unported__/account_invoice_zero/__openerp__.py | 3 | 1960 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Account Invoice Zero',
'version': '1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'Accounting & Finance',
'depends': ['account',
],
'description': """
Account Invoice Zero
====================
Invoices with a amount of 0 are automatically set as paid.
When an invoice has an amount of 0, OpenERP still generates a
receivable/payable move line with a 0 balance. The invoice stays as
open even if there is nothing to pay. The user has 2 ways to set the
invoice as paid: create a payment of 0 and reconcile the line with the
payment or reconcile the receivable/payable move line with itself.
This module takes the latter approach and will directly set the invoice
as paid once it is opened.
""",
'website': 'http://www.camptocamp.com',
'data': [],
'test': ['test/account_invoice_zero_paid.yml',
'test/account_invoice_no_zero_open.yml',
],
'installable': False,
'auto_install': False,
}
| agpl-3.0 | -991,330,407,363,221,000 | 36.692308 | 78 | 0.640816 | false |
drolland/Denis-GEGL | tests/python/test-gegl-node.py | 7 | 4465 | #!/usr/bin/env python
""" This file is part of GEGL
*
* GEGL is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* GEGL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GEGL; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2011 Jon Nordby <[email protected]>
"""
import unittest
from gi.repository import Gegl
invert_crop_xml = """<?xml version='1.0' encoding='UTF-8'?>
<gegl>
<node operation='gegl:invert-linear'>
</node>
<node operation='gegl:crop'>
<params>
<param name='x'>0</param>
<param name='y'>0</param>
<param name='width'>0</param>
<param name='height'>0</param>
</params>
</node>
</gegl>
"""
class TestGeglNodes(unittest.TestCase):
def test_exists(self):
Gegl.Node
def test_new(self):
graph = Gegl.Node.new()
self.assertEqual(type(graph), Gegl.Node)
def test_node_properties(self):
graph = Gegl.Node()
node = graph.create_child("gegl:nop")
self.assertEqual("gegl:nop", node.get_property("operation"))
node.set_property("operation", "gegl:translate")
self.assertEqual("gegl:translate", node.get_property("operation"))
default_x = node.get_property("x")
default_sampler = node.get_property("sampler")
self.assertIsNotNone(default_x)
self.assertIsNotNone(default_sampler)
node.set_property("x", 10)
self.assertEqual(node.get_property("x"), 10)
node.set_property("x", -10)
self.assertEqual(node.get_property("x"), -10)
node.set_property("sampler", Gegl.SamplerType.NEAREST)
self.assertEqual(node.get_property("sampler"), Gegl.SamplerType.NEAREST)
node.set_property("sampler", "linear")
self.assertEqual(node.get_property("sampler"), Gegl.SamplerType.LINEAR)
node.set_property("operation", "gegl:nop")
self.assertEqual("gegl:nop", node.get_property("operation"))
node.set_property("operation", "gegl:translate")
self.assertEqual("gegl:translate", node.get_property("operation"))
self.assertEqual(node.get_property("x"), default_x)
self.assertEqual(node.get_property("sampler"), default_sampler)
def test_create_graph(self):
graph = Gegl.Node()
color_node = graph.create_child("gegl:color")
crop_node = graph.create_child("gegl:crop")
self.assertEqual(color_node.get_operation(), "gegl:color")
self.assertEqual(crop_node.get_operation(), "gegl:crop")
crop_rect = Gegl.Rectangle.new(10, 20, 5, 15)
crop_node.set_property("x", crop_rect.x)
crop_node.set_property("y", crop_rect.y)
crop_node.set_property("width", crop_rect.width)
crop_node.set_property("height", crop_rect.height)
color_node.connect_to("output", crop_node, "input")
self.assertTrue(crop_rect.equal(crop_node.get_bounding_box()))
trans_node = graph.create_child("gegl:translate")
crop_node.connect_to("output", trans_node, "input")
self.assertTrue(crop_rect.equal(trans_node.get_bounding_box()))
trans_node.set_property("x", 10)
self.assertFalse(crop_rect.equal(trans_node.get_bounding_box()))
trans_rect = crop_rect.dup()
trans_rect.x += 10
self.assertTrue(trans_rect.equal(trans_node.get_bounding_box()))
class TestGeglXml(unittest.TestCase):
def test_load_xml(self):
graph = Gegl.Node.new_from_xml(invert_crop_xml, "")
children = graph.get_children()
self.assertEqual(len(children), 2)
self.assertEqual(children[0].get_operation(), "gegl:crop")
self.assertEqual(children[1].get_operation(), "gegl:invert-linear")
def test_load_save_roundtrip(self):
graph = Gegl.Node.new_from_xml(invert_crop_xml, "")
output = graph.to_xml("")
self.assertEqual(output, invert_crop_xml)
if __name__ == '__main__':
Gegl.init(None);
unittest.main()
Gegl.exit()
| lgpl-3.0 | -6,764,916,475,776,900,000 | 31.122302 | 80 | 0.645465 | false |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/axes_props.py | 6 | 1238 | #!/usr/bin/env python
"""
You can control the axis tick and grid properties
"""
from pylab import *
t = arange(0.0, 2.0, 0.01)
s = sin(2*pi*t)
plot(t, s)
grid(True)
# MATLAB style
xticklines = getp(gca(), 'xticklines')
yticklines = getp(gca(), 'yticklines')
xgridlines = getp(gca(), 'xgridlines')
ygridlines = getp(gca(), 'ygridlines')
xticklabels = getp(gca(), 'xticklabels')
yticklabels = getp(gca(), 'yticklabels')
setp(xticklines, 'linewidth', 3)
setp(yticklines, 'linewidth', 3)
setp(xgridlines, 'linestyle', '-')
setp(ygridlines, 'linestyle', '-')
setp(yticklabels, 'color', 'r', fontsize='medium')
setp(xticklabels, 'color', 'r', fontsize='medium')
show()
"""
# the same script, python style
from pylab import *
t = arange(0.0, 2.0, 0.01)
s = sin(2*pi*t)
ax = subplot(111)
ax.plot(t, s)
ax.grid(True)
ticklines = ax.get_xticklines()
ticklines.extend( ax.get_yticklines() )
gridlines = ax.get_xgridlines()
gridlines.extend( ax.get_ygridlines() )
ticklabels = ax.get_xticklabels()
ticklabels.extend( ax.get_yticklabels() )
for line in ticklines:
line.set_linewidth(3)
for line in gridlines:
line.set_linestyle('-')
for label in ticklabels:
label.set_color('r')
label.set_fontsize('medium')
show()
"""
| gpl-2.0 | -8,360,840,264,019,771,000 | 18.967742 | 50 | 0.676898 | false |
alien4cloud/alien4cloud-cloudify3-provider | alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/lamp/wrapper/Wordpress_PHP/wordpressConnectToPHPPHP/tosca.interfaces.relationship.Configure/pre_configure_source/_a4c_pre_configure_source.py | 1 | 15586 |
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
env_map = {}
env_map['TARGET_NODE'] = ctx.target.node.id
env_map['TARGET_INSTANCE'] = ctx.target.instance.id
env_map['TARGET_INSTANCES'] = get_instance_list(ctx.target.node.id)
env_map['SOURCE_NODE'] = ctx.source.node.id
env_map['SOURCE_INSTANCE'] = ctx.source.instance.id
env_map['SOURCE_INSTANCES'] = get_instance_list(ctx.source.node.id)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx.source, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx.source)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx.source)
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Wordpress_PHP/wordpressConnectToPHPPHP/tosca.interfaces.relationship.Configure/pre_configure_source/install_php_module.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.source.instance.runtime_properties['_a4c_OO:tosca.interfaces.relationship.Configure:pre_configure_source:{0}'.format(k)] = v
ctx.source.instance.runtime_properties['wordpress_url'] = r'http://' + get_attribute(ctx.source, 'public_ip_address') + r':' + r'80' + r'/'
ctx.source.instance.update()
ctx.target.instance.update()
| apache-2.0 | -1,615,140,865,971,094,300 | 43.916427 | 232 | 0.661299 | false |
wwj718/edx-platform | cms/djangoapps/contentstore/views/tests/test_item.py | 8 | 101384 | """Tests for items views."""
import json
from datetime import datetime, timedelta
import ddt
from mock import patch, Mock, PropertyMock
from pytz import UTC
from pyquery import PyQuery
from webob import Response
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from contentstore.utils import reverse_usage_url, reverse_course_url
from contentstore.views.component import (
component_handler, get_component_templates
)
from contentstore.views.item import (
create_xblock_info, ALWAYS, VisibilityState, _xblock_type_and_display_name, add_container_page_publishing_info
)
from contentstore.tests.utils import CourseTestCase
from student.tests.factories import UserFactory
from xmodule.capa_module import CapaDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE
from xmodule.modulestore.tests.factories import ItemFactory, LibraryFactory, check_mongo_calls, CourseFactory
from xmodule.x_module import STUDIO_VIEW, STUDENT_VIEW
from xmodule.course_module import DEFAULT_START_DATE
from xblock.exceptions import NoSuchHandlerError
from xblock_django.user_service import DjangoXBlockUserService
from opaque_keys.edx.keys import UsageKey, CourseKey
from opaque_keys.edx.locations import Location
from xmodule.partitions.partitions import Group, UserPartition
class ItemTest(CourseTestCase):
""" Base test class for create, save, and delete """
def setUp(self):
super(ItemTest, self).setUp()
self.course_key = self.course.id
self.usage_key = self.course.location
def get_item_from_modulestore(self, usage_key, verify_is_draft=False):
"""
Get the item referenced by the UsageKey from the modulestore
"""
item = self.store.get_item(usage_key)
if verify_is_draft:
self.assertTrue(getattr(item, 'is_draft', False))
return item
def response_usage_key(self, response):
"""
Get the UsageKey from the response payload and verify that the status_code was 200.
:param response:
"""
parsed = json.loads(response.content)
self.assertEqual(response.status_code, 200)
key = UsageKey.from_string(parsed['locator'])
if key.course_key.run is None:
key = key.map_into_course(CourseKey.from_string(parsed['courseKey']))
return key
def create_xblock(self, parent_usage_key=None, display_name=None, category=None, boilerplate=None):
data = {
'parent_locator': unicode(self.usage_key) if parent_usage_key is None else unicode(parent_usage_key),
'category': category
}
if display_name is not None:
data['display_name'] = display_name
if boilerplate is not None:
data['boilerplate'] = boilerplate
return self.client.ajax_post(reverse('contentstore.views.xblock_handler'), json.dumps(data))
def _create_vertical(self, parent_usage_key=None):
"""
Creates a vertical, returning its UsageKey.
"""
resp = self.create_xblock(category='vertical', parent_usage_key=parent_usage_key)
self.assertEqual(resp.status_code, 200)
return self.response_usage_key(resp)
@ddt.ddt
class GetItemTest(ItemTest):
"""Tests for '/xblock' GET url."""
def _get_preview(self, usage_key, data=None):
""" Makes a request to xblock preview handler """
preview_url = reverse_usage_url("xblock_view_handler", usage_key, {'view_name': 'container_preview'})
data = data if data else {}
resp = self.client.get(preview_url, data, HTTP_ACCEPT='application/json')
return resp
def _get_container_preview(self, usage_key, data=None):
"""
Returns the HTML and resources required for the xblock at the specified UsageKey
"""
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content)
html = resp_content['html']
self.assertTrue(html)
resources = resp_content['resources']
self.assertIsNotNone(resources)
return html, resources
def _get_container_preview_with_error(self, usage_key, expected_code, data=None, content_contains=None):
""" Make request and asserts on response code and response contents """
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, expected_code)
if content_contains:
self.assertIn(content_contains, resp.content)
return resp
@ddt.data(
(1, 17, 15, 16, 12),
(2, 17, 15, 16, 12),
(3, 17, 15, 16, 12),
)
@ddt.unpack
def test_get_query_count(self, branching_factor, chapter_queries, section_queries, unit_queries, problem_queries):
self.populate_course(branching_factor)
# Retrieve it
with check_mongo_calls(chapter_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['chapter'][-1]))
with check_mongo_calls(section_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['sequential'][-1]))
with check_mongo_calls(unit_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['vertical'][-1]))
with check_mongo_calls(problem_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['problem'][-1]))
@ddt.data(
(1, 30),
(2, 32),
(3, 34),
)
@ddt.unpack
def test_container_get_query_count(self, branching_factor, unit_queries,):
self.populate_course(branching_factor)
with check_mongo_calls(unit_queries):
self.client.get(reverse_usage_url('xblock_container_handler', self.populated_usage_keys['vertical'][-1]))
def test_get_vertical(self):
# Add a vertical
resp = self.create_xblock(category='vertical')
usage_key = self.response_usage_key(resp)
# Retrieve it
resp = self.client.get(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 200)
def test_get_empty_container_fragment(self):
root_usage_key = self._create_vertical()
html, __ = self._get_container_preview(root_usage_key)
# XBlock messages are added by the Studio wrapper.
self.assertIn('wrapper-xblock-message', html)
# Make sure that "wrapper-xblock" does not appear by itself (without -message at end).
self.assertNotRegexpMatches(html, r'wrapper-xblock[^-]+')
# Verify that the header and article tags are still added
self.assertIn('<header class="xblock-header xblock-header-vertical">', html)
self.assertIn('<article class="xblock-render">', html)
def test_get_container_fragment(self):
root_usage_key = self._create_vertical()
# Add a problem beneath a child vertical
child_vertical_usage_key = self._create_vertical(parent_usage_key=root_usage_key)
resp = self.create_xblock(parent_usage_key=child_vertical_usage_key, category='problem', boilerplate='multiplechoice.yaml')
self.assertEqual(resp.status_code, 200)
# Get the preview HTML
html, __ = self._get_container_preview(root_usage_key)
# Verify that the Studio nesting wrapper has been added
self.assertIn('level-nesting', html)
self.assertIn('<header class="xblock-header xblock-header-vertical">', html)
self.assertIn('<article class="xblock-render">', html)
# Verify that the Studio element wrapper has been added
self.assertIn('level-element', html)
def test_get_container_nested_container_fragment(self):
"""
Test the case of the container page containing a link to another container page.
"""
# Add a wrapper with child beneath a child vertical
root_usage_key = self._create_vertical()
resp = self.create_xblock(parent_usage_key=root_usage_key, category="wrapper")
self.assertEqual(resp.status_code, 200)
wrapper_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=wrapper_usage_key, category='problem', boilerplate='multiplechoice.yaml')
self.assertEqual(resp.status_code, 200)
# Get the preview HTML and verify the View -> link is present.
html, __ = self._get_container_preview(root_usage_key)
self.assertIn('wrapper-xblock', html)
self.assertRegexpMatches(
html,
# The instance of the wrapper class will have an auto-generated ID. Allow any
# characters after wrapper.
r'"/container/{}" class="action-button">\s*<span class="action-button-text">View</span>'.format(
wrapper_usage_key
)
)
def test_split_test(self):
"""
Test that a split_test module renders all of its children in Studio.
"""
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
split_test_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=split_test_usage_key, category='html', boilerplate='announcement.yaml')
self.assertEqual(resp.status_code, 200)
resp = self.create_xblock(parent_usage_key=split_test_usage_key, category='html', boilerplate='zooming_image.yaml')
self.assertEqual(resp.status_code, 200)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertIn('Announcement', html)
self.assertIn('Zooming', html)
def test_split_test_edited(self):
"""
Test that rename of a group changes display name of child vertical.
"""
self.course.user_partitions = [UserPartition(
0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta')]
)]
self.store.update_item(self.course, self.user.id)
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
split_test_usage_key = self.response_usage_key(resp)
self.client.ajax_post(
reverse_usage_url("xblock_handler", split_test_usage_key),
data={'metadata': {'user_partition_id': str(0)}}
)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertIn('alpha', html)
self.assertIn('beta', html)
# Rename groups in group configuration
GROUP_CONFIGURATION_JSON = {
u'id': 0,
u'name': u'first_partition',
u'scheme': u'random',
u'description': u'First Partition',
u'version': UserPartition.VERSION,
u'groups': [
{u'id': 0, u'name': u'New_NAME_A', u'version': 1},
{u'id': 1, u'name': u'New_NAME_B', u'version': 1},
],
}
response = self.client.put(
reverse_course_url('group_configurations_detail_handler', self.course.id, kwargs={'group_configuration_id': 0}),
data=json.dumps(GROUP_CONFIGURATION_JSON),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 201)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertNotIn('alpha', html)
self.assertNotIn('beta', html)
self.assertIn('New_NAME_A', html)
self.assertIn('New_NAME_B', html)
def test_valid_paging(self):
"""
Tests that valid paging is passed along to underlying block
"""
with patch('contentstore.views.item.get_preview_fragment') as patched_get_preview_fragment:
retval = Mock()
type(retval).content = PropertyMock(return_value="Some content")
type(retval).resources = PropertyMock(return_value=[])
patched_get_preview_fragment.return_value = retval
root_usage_key = self._create_vertical()
_, _ = self._get_container_preview(
root_usage_key,
{'enable_paging': 'true', 'page_number': 0, 'page_size': 2}
)
call_args = patched_get_preview_fragment.call_args[0]
_, _, context = call_args
self.assertIn('paging', context)
self.assertEqual({'page_number': 0, 'page_size': 2}, context['paging'])
@ddt.data([1, 'invalid'], ['invalid', 2])
@ddt.unpack
def test_invalid_paging(self, page_number, page_size):
"""
Tests that valid paging is passed along to underlying block
"""
root_usage_key = self._create_vertical()
self._get_container_preview_with_error(
root_usage_key,
400,
data={'enable_paging': 'true', 'page_number': page_number, 'page_size': page_size},
content_contains="Couldn't parse paging parameters"
)
def test_get_user_partitions_and_groups(self):
self.course.user_partitions = [
UserPartition(
id=0,
name="Verification user partition",
scheme=UserPartition.get_scheme("verification"),
description="Verification user partition",
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
]
self.store.update_item(self.course, self.user.id)
# Create an item and retrieve it
resp = self.create_xblock(category='vertical')
usage_key = self.response_usage_key(resp)
resp = self.client.get(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 200)
# Check that the partition and group information was returned
result = json.loads(resp.content)
self.assertEqual(result["user_partitions"], [
{
"id": 0,
"name": "Verification user partition",
"scheme": "verification",
"groups": [
{
"id": 0,
"name": "Group A",
"selected": False,
"deleted": False,
},
{
"id": 1,
"name": "Group B",
"selected": False,
"deleted": False,
},
]
}
])
self.assertEqual(result["group_access"], {})
@ddt.ddt
class DeleteItem(ItemTest):
"""Tests for '/xblock' DELETE url."""
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_static_page(self, store):
course = CourseFactory.create(default_store=store)
# Add static tab
resp = self.create_xblock(category='static_tab', parent_usage_key=course.location)
usage_key = self.response_usage_key(resp)
# Now delete it. There was a bug that the delete was failing (static tabs do not exist in draft modulestore).
resp = self.client.delete(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 204)
class TestCreateItem(ItemTest):
"""
Test the create_item handler thoroughly
"""
def test_create_nicely(self):
"""
Try the straightforward use cases
"""
# create a chapter
display_name = 'Nicely created'
resp = self.create_xblock(display_name=display_name, category='chapter')
# get the new item and check its category and display_name
chap_usage_key = self.response_usage_key(resp)
new_obj = self.get_item_from_modulestore(chap_usage_key)
self.assertEqual(new_obj.scope_ids.block_type, 'chapter')
self.assertEqual(new_obj.display_name, display_name)
self.assertEqual(new_obj.location.org, self.course.location.org)
self.assertEqual(new_obj.location.course, self.course.location.course)
# get the course and ensure it now points to this one
course = self.get_item_from_modulestore(self.usage_key)
self.assertIn(chap_usage_key, course.children)
# use default display name
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='vertical')
vert_usage_key = self.response_usage_key(resp)
# create problem w/ boilerplate
template_id = 'multiplechoice.yaml'
resp = self.create_xblock(
parent_usage_key=vert_usage_key,
category='problem',
boilerplate=template_id
)
prob_usage_key = self.response_usage_key(resp)
problem = self.get_item_from_modulestore(prob_usage_key, verify_is_draft=True)
# check against the template
template = CapaDescriptor.get_template(template_id)
self.assertEqual(problem.data, template['data'])
self.assertEqual(problem.display_name, template['metadata']['display_name'])
self.assertEqual(problem.markdown, template['metadata']['markdown'])
def test_create_item_negative(self):
"""
Negative tests for create_item
"""
# non-existent boilerplate: creates a default
resp = self.create_xblock(category='problem', boilerplate='nosuchboilerplate.yaml')
self.assertEqual(resp.status_code, 200)
def test_create_with_future_date(self):
self.assertEqual(self.course.start, datetime(2030, 1, 1, tzinfo=UTC))
resp = self.create_xblock(category='chapter')
usage_key = self.response_usage_key(resp)
obj = self.get_item_from_modulestore(usage_key)
self.assertEqual(obj.start, datetime(2030, 1, 1, tzinfo=UTC))
def test_static_tabs_initialization(self):
"""
Test that static tab display names are not being initialized as None.
"""
# Add a new static tab with no explicit name
resp = self.create_xblock(category='static_tab')
usage_key = self.response_usage_key(resp)
# Check that its name is not None
new_tab = self.get_item_from_modulestore(usage_key)
self.assertEquals(new_tab.display_name, 'Empty')
class TestDuplicateItem(ItemTest):
"""
Test the duplicate method.
"""
def setUp(self):
""" Creates the test course structure and a few components to 'duplicate'. """
super(TestDuplicateItem, self).setUp()
# Create a parent chapter (for testing children of children).
resp = self.create_xblock(parent_usage_key=self.usage_key, category='chapter')
self.chapter_usage_key = self.response_usage_key(resp)
# create a sequential containing a problem and an html component
resp = self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential')
self.seq_usage_key = self.response_usage_key(resp)
# create problem and an html component
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='problem', boilerplate='multiplechoice.yaml')
self.problem_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='html')
self.html_usage_key = self.response_usage_key(resp)
# Create a second sequential just (testing children of children)
self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential2')
def test_duplicate_equality(self):
"""
Tests that a duplicated xblock is identical to the original,
except for location and display name.
"""
def duplicate_and_verify(source_usage_key, parent_usage_key):
""" Duplicates the source, parenting to supplied parent. Then does equality check. """
usage_key = self._duplicate_item(parent_usage_key, source_usage_key)
self.assertTrue(
check_equality(source_usage_key, usage_key, parent_usage_key),
"Duplicated item differs from original"
)
def check_equality(source_usage_key, duplicate_usage_key, parent_usage_key=None):
"""
Gets source and duplicated items from the modulestore using supplied usage keys.
Then verifies that they represent equivalent items (modulo parents and other
known things that may differ).
"""
original_item = self.get_item_from_modulestore(source_usage_key)
duplicated_item = self.get_item_from_modulestore(duplicate_usage_key)
self.assertNotEqual(
unicode(original_item.location),
unicode(duplicated_item.location),
"Location of duplicate should be different from original"
)
# Parent will only be equal for root of duplicated structure, in the case
# where an item is duplicated in-place.
if parent_usage_key and unicode(original_item.parent) == unicode(parent_usage_key):
self.assertEqual(
unicode(parent_usage_key), unicode(duplicated_item.parent),
"Parent of duplicate should equal parent of source for root xblock when duplicated in-place"
)
else:
self.assertNotEqual(
unicode(original_item.parent), unicode(duplicated_item.parent),
"Parent duplicate should be different from source"
)
# Set the location, display name, and parent to be the same so we can make sure the rest of the
# duplicate is equal.
duplicated_item.location = original_item.location
duplicated_item.display_name = original_item.display_name
duplicated_item.parent = original_item.parent
# Children will also be duplicated, so for the purposes of testing equality, we will set
# the children to the original after recursively checking the children.
if original_item.has_children:
self.assertEqual(
len(original_item.children),
len(duplicated_item.children),
"Duplicated item differs in number of children"
)
for i in xrange(len(original_item.children)):
if not check_equality(original_item.children[i], duplicated_item.children[i]):
return False
duplicated_item.children = original_item.children
return original_item == duplicated_item
duplicate_and_verify(self.problem_usage_key, self.seq_usage_key)
duplicate_and_verify(self.html_usage_key, self.seq_usage_key)
duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key)
duplicate_and_verify(self.chapter_usage_key, self.usage_key)
def test_ordering(self):
"""
Tests the a duplicated xblock appears immediately after its source
(if duplicate and source share the same parent), else at the
end of the children of the parent.
"""
def verify_order(source_usage_key, parent_usage_key, source_position=None):
usage_key = self._duplicate_item(parent_usage_key, source_usage_key)
parent = self.get_item_from_modulestore(parent_usage_key)
children = parent.children
if source_position is None:
self.assertFalse(source_usage_key in children, 'source item not expected in children array')
self.assertEqual(
children[len(children) - 1],
usage_key,
"duplicated item not at end"
)
else:
self.assertEqual(
children[source_position],
source_usage_key,
"source item at wrong position"
)
self.assertEqual(
children[source_position + 1],
usage_key,
"duplicated item not ordered after source item"
)
verify_order(self.problem_usage_key, self.seq_usage_key, 0)
# 2 because duplicate of problem should be located before.
verify_order(self.html_usage_key, self.seq_usage_key, 2)
verify_order(self.seq_usage_key, self.chapter_usage_key, 0)
# Test duplicating something into a location that is not the parent of the original item.
# Duplicated item should appear at the end.
verify_order(self.html_usage_key, self.usage_key)
def test_display_name(self):
"""
Tests the expected display name for the duplicated xblock.
"""
def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):
usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)
duplicated_item = self.get_item_from_modulestore(usage_key)
self.assertEqual(duplicated_item.display_name, expected_name)
return usage_key
# Display name comes from template.
dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, "Duplicate of 'Multiple Choice'")
# Test dupe of dupe.
verify_name(dupe_usage_key, self.seq_usage_key, "Duplicate of 'Duplicate of 'Multiple Choice''")
# Uses default display_name of 'Text' from HTML component.
verify_name(self.html_usage_key, self.seq_usage_key, "Duplicate of 'Text'")
# The sequence does not have a display_name set, so category is shown.
verify_name(self.seq_usage_key, self.chapter_usage_key, "Duplicate of sequential")
# Now send a custom display name for the duplicate.
verify_name(self.seq_usage_key, self.chapter_usage_key, "customized name", display_name="customized name")
def _duplicate_item(self, parent_usage_key, source_usage_key, display_name=None):
data = {
'parent_locator': unicode(parent_usage_key),
'duplicate_source_locator': unicode(source_usage_key)
}
if display_name is not None:
data['display_name'] = display_name
resp = self.client.ajax_post(reverse('contentstore.views.xblock_handler'), json.dumps(data))
return self.response_usage_key(resp)
class TestEditItemSetup(ItemTest):
"""
Setup for xblock update tests.
"""
def setUp(self):
""" Creates the test course structure and a couple problems to 'edit'. """
super(TestEditItemSetup, self).setUp()
# create a chapter
display_name = 'chapter created'
resp = self.create_xblock(display_name=display_name, category='chapter')
chap_usage_key = self.response_usage_key(resp)
# create 2 sequentials
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='sequential')
self.seq_usage_key = self.response_usage_key(resp)
self.seq_update_url = reverse_usage_url("xblock_handler", self.seq_usage_key)
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='sequential')
self.seq2_usage_key = self.response_usage_key(resp)
self.seq2_update_url = reverse_usage_url("xblock_handler", self.seq2_usage_key)
# create problem w/ boilerplate
template_id = 'multiplechoice.yaml'
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='problem', boilerplate=template_id)
self.problem_usage_key = self.response_usage_key(resp)
self.problem_update_url = reverse_usage_url("xblock_handler", self.problem_usage_key)
self.course_update_url = reverse_usage_url("xblock_handler", self.usage_key)
class TestEditItem(TestEditItemSetup):
"""
Test xblock update.
"""
def test_delete_field(self):
"""
Sending null in for a field 'deletes' it
"""
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'rerandomize': 'onreset'}}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(problem.rerandomize, 'onreset')
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'rerandomize': None}}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(problem.rerandomize, 'never')
def test_null_field(self):
"""
Sending null in for a field 'deletes' it
"""
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertIsNotNone(problem.markdown)
self.client.ajax_post(
self.problem_update_url,
data={'nullout': ['markdown']}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertIsNone(problem.markdown)
def test_date_fields(self):
"""
Test setting due & start dates on sequential
"""
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertIsNone(sequential.due)
self.client.ajax_post(
self.seq_update_url,
data={'metadata': {'due': '2010-11-22T04:00Z'}}
)
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.client.ajax_post(
self.seq_update_url,
data={'metadata': {'start': '2010-09-12T14:00Z'}}
)
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.assertEqual(sequential.start, datetime(2010, 9, 12, 14, 0, tzinfo=UTC))
def test_delete_child(self):
"""
Test deleting a child.
"""
# Create 2 children of main course.
resp_1 = self.create_xblock(display_name='child 1', category='chapter')
resp_2 = self.create_xblock(display_name='child 2', category='chapter')
chapter1_usage_key = self.response_usage_key(resp_1)
chapter2_usage_key = self.response_usage_key(resp_2)
course = self.get_item_from_modulestore(self.usage_key)
self.assertIn(chapter1_usage_key, course.children)
self.assertIn(chapter2_usage_key, course.children)
# Remove one child from the course.
resp = self.client.delete(reverse_usage_url("xblock_handler", chapter1_usage_key))
self.assertEqual(resp.status_code, 204)
# Verify that the child is removed.
course = self.get_item_from_modulestore(self.usage_key)
self.assertNotIn(chapter1_usage_key, course.children)
self.assertIn(chapter2_usage_key, course.children)
def test_reorder_children(self):
"""
Test reordering children that can be in the draft store.
"""
# Create 2 child units and re-order them. There was a bug about @draft getting added
# to the IDs.
unit_1_resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical')
unit_2_resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical')
unit1_usage_key = self.response_usage_key(unit_1_resp)
unit2_usage_key = self.response_usage_key(unit_2_resp)
# The sequential already has a child defined in the setUp (a problem).
# Children must be on the sequential to reproduce the original bug,
# as it is important that the parent (sequential) NOT be in the draft store.
children = self.get_item_from_modulestore(self.seq_usage_key).children
self.assertEqual(unit1_usage_key, children[1])
self.assertEqual(unit2_usage_key, children[2])
resp = self.client.ajax_post(
self.seq_update_url,
data={'children': [unicode(self.problem_usage_key), unicode(unit2_usage_key), unicode(unit1_usage_key)]}
)
self.assertEqual(resp.status_code, 200)
children = self.get_item_from_modulestore(self.seq_usage_key).children
self.assertEqual(self.problem_usage_key, children[0])
self.assertEqual(unit1_usage_key, children[2])
self.assertEqual(unit2_usage_key, children[1])
def test_move_parented_child(self):
"""
Test moving a child from one Section to another
"""
unit_1_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical', display_name='unit 1')
)
unit_2_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 2')
)
# move unit 1 from sequential1 to sequential2
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [unicode(unit_1_key), unicode(unit_2_key)]}
)
self.assertEqual(resp.status_code, 200)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[unit_1_key, unit_2_key],
)
self.assertListEqual(
self.get_item_from_modulestore(self.seq_usage_key).children,
[self.problem_usage_key], # problem child created in setUp
)
def test_move_orphaned_child_error(self):
"""
Test moving an orphan returns an error
"""
unit_1_key = self.store.create_item(self.user.id, self.course_key, 'vertical', 'unit1').location
# adding orphaned unit 1 should return an error
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [unicode(unit_1_key)]}
)
self.assertEqual(resp.status_code, 400)
self.assertIn("Invalid data, possibly caused by concurrent authors", resp.content)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[]
)
def test_move_child_creates_orphan_error(self):
"""
Test creating an orphan returns an error
"""
unit_1_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 1')
)
unit_2_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 2')
)
# remove unit 2 should return an error
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [unicode(unit_1_key)]}
)
self.assertEqual(resp.status_code, 400)
self.assertIn("Invalid data, possibly caused by concurrent authors", resp.content)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[unit_1_key, unit_2_key]
)
def _is_location_published(self, location):
"""
Returns whether or not the item with given location has a published version.
"""
return modulestore().has_item(location, revision=ModuleStoreEnum.RevisionOption.published_only)
def _verify_published_with_no_draft(self, location):
"""
Verifies the item with given location has a published version and no draft (unpublished changes).
"""
self.assertTrue(self._is_location_published(location))
self.assertFalse(modulestore().has_changes(modulestore().get_item(location)))
def _verify_published_with_draft(self, location):
"""
Verifies the item with given location has a published version and also a draft version (unpublished changes).
"""
self.assertTrue(self._is_location_published(location))
self.assertTrue(modulestore().has_changes(modulestore().get_item(location)))
def test_make_public(self):
""" Test making a private problem public (publishing it). """
# When the problem is first created, it is only in draft (because of its category).
self.assertFalse(self._is_location_published(self.problem_usage_key))
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
def test_make_draft(self):
""" Test creating a draft version of a public problem. """
self._make_draft_content_different_from_published()
def test_revert_to_published(self):
""" Test reverting draft content to published """
self._make_draft_content_different_from_published()
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'discard_changes'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertIsNone(published.due)
def test_republish(self):
""" Test republishing an item. """
new_display_name = 'New Display Name'
# When the problem is first created, it is only in draft (because of its category).
self.assertFalse(self._is_location_published(self.problem_usage_key))
# Republishing when only in draft will update the draft but not cause a public item to be created.
self.client.ajax_post(
self.problem_update_url,
data={
'publish': 'republish',
'metadata': {
'display_name': new_display_name
}
}
)
self.assertFalse(self._is_location_published(self.problem_usage_key))
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(draft.display_name, new_display_name)
# Publish the item
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
# Now republishing should update the published version
new_display_name_2 = 'New Display Name 2'
self.client.ajax_post(
self.problem_update_url,
data={
'publish': 'republish',
'metadata': {
'display_name': new_display_name_2
}
}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(
self.problem_usage_key,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertEqual(published.display_name, new_display_name_2)
def test_direct_only_categories_not_republished(self):
"""Verify that republish is ignored for items in DIRECT_ONLY_CATEGORIES"""
# Create a vertical child with published and unpublished versions.
# If the parent sequential is not re-published, then the child problem should also not be re-published.
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='vertical', category='vertical')
vertical_usage_key = self.response_usage_key(resp)
vertical_update_url = reverse_usage_url('xblock_handler', vertical_usage_key)
self.client.ajax_post(vertical_update_url, data={'publish': 'make_public'})
self.client.ajax_post(vertical_update_url, data={'metadata': {'display_name': 'New Display Name'}})
self._verify_published_with_draft(self.seq_usage_key)
self.client.ajax_post(self.seq_update_url, data={'publish': 'republish'})
self._verify_published_with_draft(self.seq_usage_key)
def _make_draft_content_different_from_published(self):
"""
Helper method to create different draft and published versions of a problem.
"""
# Make problem public.
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key, revision=ModuleStoreEnum.RevisionOption.published_only)
# Update the draft version and check that published is different.
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'due': '2077-10-10T04:00Z'}}
)
updated_draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(updated_draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))
self.assertIsNone(published.due)
# Fetch the published version again to make sure the due date is still unset.
published = modulestore().get_item(published.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertIsNone(published.due)
def test_make_public_with_update(self):
""" Update a problem and make it public at the same time. """
self.client.ajax_post(
self.problem_update_url,
data={
'metadata': {'due': '2077-10-10T04:00Z'},
'publish': 'make_public'
}
)
published = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(published.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))
def test_published_and_draft_contents_with_update(self):
""" Create a draft and publish it then modify the draft and check that published content is not modified """
# Make problem public.
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key, revision=ModuleStoreEnum.RevisionOption.published_only)
# Now make a draft
self.client.ajax_post(
self.problem_update_url,
data={
'id': unicode(self.problem_usage_key),
'metadata': {},
'data': "<p>Problem content draft.</p>"
}
)
# Both published and draft content should be different
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertNotEqual(draft.data, published.data)
# Get problem by 'xblock_handler'
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDENT_VIEW})
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
# Activate the editing view
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDIO_VIEW})
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
# Both published and draft content should still be different
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertNotEqual(draft.data, published.data)
# Fetch the published version again to make sure the data is correct.
published = modulestore().get_item(published.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertNotEqual(draft.data, published.data)
def test_publish_states_of_nested_xblocks(self):
""" Test publishing of a unit page containing a nested xblock """
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='Test Unit', category='vertical')
unit_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=unit_usage_key, category='wrapper')
wrapper_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=wrapper_usage_key, category='html')
html_usage_key = self.response_usage_key(resp)
# The unit and its children should be private initially
unit_update_url = reverse_usage_url('xblock_handler', unit_usage_key)
self.assertFalse(self._is_location_published(unit_usage_key))
self.assertFalse(self._is_location_published(html_usage_key))
# Make the unit public and verify that the problem is also made public
resp = self.client.ajax_post(
unit_update_url,
data={'publish': 'make_public'}
)
self.assertEqual(resp.status_code, 200)
self._verify_published_with_no_draft(unit_usage_key)
self._verify_published_with_no_draft(html_usage_key)
# Make a draft for the unit and verify that the problem also has a draft
resp = self.client.ajax_post(
unit_update_url,
data={
'id': unicode(unit_usage_key),
'metadata': {},
}
)
self.assertEqual(resp.status_code, 200)
self._verify_published_with_draft(unit_usage_key)
self._verify_published_with_draft(html_usage_key)
def test_field_value_errors(self):
"""
Test that if the user's input causes a ValueError on an XBlock field,
we provide a friendly error message back to the user.
"""
response = self.create_xblock(parent_usage_key=self.seq_usage_key, category='video')
video_usage_key = self.response_usage_key(response)
update_url = reverse_usage_url('xblock_handler', video_usage_key)
response = self.client.ajax_post(
update_url,
data={
'id': unicode(video_usage_key),
'metadata': {
'saved_video_position': "Not a valid relative time",
},
}
)
self.assertEqual(response.status_code, 400)
parsed = json.loads(response.content)
self.assertIn("error", parsed)
self.assertIn("Incorrect RelativeTime value", parsed["error"]) # See xmodule/fields.py
class TestEditItemSplitMongo(TestEditItemSetup):
"""
Tests for EditItem running on top of the SplitMongoModuleStore.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def test_editing_view_wrappers(self):
"""
Verify that the editing view only generates a single wrapper, no matter how many times it's loaded
Exposes: PLAT-417
"""
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDIO_VIEW})
for __ in xrange(3):
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
content = json.loads(resp.content)
self.assertEqual(len(PyQuery(content['html'])('.xblock-{}'.format(STUDIO_VIEW))), 1)
class TestEditSplitModule(ItemTest):
"""
Tests around editing instances of the split_test module.
"""
def setUp(self):
super(TestEditSplitModule, self).setUp()
self.course.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta')]
),
UserPartition(
1, 'second_partition', 'Second Partition',
[Group("0", 'Group 0'), Group("1", 'Group 1'), Group("2", 'Group 2')]
)
]
self.store.update_item(self.course, self.user.id)
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
self.split_test_usage_key = self.response_usage_key(resp)
self.split_test_update_url = reverse_usage_url("xblock_handler", self.split_test_usage_key)
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/dummy-url')
self.request.user = self.user
def _update_partition_id(self, partition_id):
"""
Helper method that sets the user_partition_id to the supplied value.
The updated split_test instance is returned.
"""
self.client.ajax_post(
self.split_test_update_url,
# Even though user_partition_id is Scope.content, it will get saved by the Studio editor as
# metadata. The code in item.py will update the field correctly, even though it is not the
# expected scope.
data={'metadata': {'user_partition_id': str(partition_id)}}
)
# Verify the partition_id was saved.
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
self.assertEqual(partition_id, split_test.user_partition_id)
return split_test
def _assert_children(self, expected_number):
"""
Verifies the number of children of the split_test instance.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, True)
self.assertEqual(expected_number, len(split_test.children))
return split_test
def test_create_groups(self):
"""
Test that verticals are created for the configuration groups when
a spit test module is edited.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(-1, split_test.user_partition_id)
self.assertEqual(0, len(split_test.children))
# Set the user_partition_id to 0.
split_test = self._update_partition_id(0)
# Verify that child verticals have been set to match the groups
self.assertEqual(2, len(split_test.children))
vertical_0 = self.get_item_from_modulestore(split_test.children[0], verify_is_draft=True)
vertical_1 = self.get_item_from_modulestore(split_test.children[1], verify_is_draft=True)
self.assertEqual("vertical", vertical_0.category)
self.assertEqual("vertical", vertical_1.category)
self.assertEqual("Group ID 0", vertical_0.display_name)
self.assertEqual("Group ID 1", vertical_1.display_name)
# Verify that the group_id_to_child mapping is correct.
self.assertEqual(2, len(split_test.group_id_to_child))
self.assertEqual(vertical_0.location, split_test.group_id_to_child['0'])
self.assertEqual(vertical_1.location, split_test.group_id_to_child['1'])
def test_change_user_partition_id(self):
"""
Test what happens when the user_partition_id is changed to a different groups
group configuration.
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
initial_vertical_0_location = split_test.children[0]
initial_vertical_1_location = split_test.children[1]
# Set to second group configuration
split_test = self._update_partition_id(1)
# We don't remove existing children.
self.assertEqual(5, len(split_test.children))
self.assertEqual(initial_vertical_0_location, split_test.children[0])
self.assertEqual(initial_vertical_1_location, split_test.children[1])
vertical_0 = self.get_item_from_modulestore(split_test.children[2], verify_is_draft=True)
vertical_1 = self.get_item_from_modulestore(split_test.children[3], verify_is_draft=True)
vertical_2 = self.get_item_from_modulestore(split_test.children[4], verify_is_draft=True)
# Verify that the group_id_to child mapping is correct.
self.assertEqual(3, len(split_test.group_id_to_child))
self.assertEqual(vertical_0.location, split_test.group_id_to_child['0'])
self.assertEqual(vertical_1.location, split_test.group_id_to_child['1'])
self.assertEqual(vertical_2.location, split_test.group_id_to_child['2'])
self.assertNotEqual(initial_vertical_0_location, vertical_0.location)
self.assertNotEqual(initial_vertical_1_location, vertical_1.location)
def test_change_same_user_partition_id(self):
"""
Test that nothing happens when the user_partition_id is set to the same value twice.
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
initial_group_id_to_child = split_test.group_id_to_child
# Set again to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
self.assertEqual(initial_group_id_to_child, split_test.group_id_to_child)
def test_change_non_existent_user_partition_id(self):
"""
Test that nothing happens when the user_partition_id is set to a value that doesn't exist.
The user_partition_id will be updated, but children and group_id_to_child map will not change.
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
initial_group_id_to_child = split_test.group_id_to_child
# Set to an group configuration that doesn't exist.
split_test = self._update_partition_id(-50)
self.assertEqual(2, len(split_test.children))
self.assertEqual(initial_group_id_to_child, split_test.group_id_to_child)
def test_add_groups(self):
"""
Test the "fix up behavior" when groups are missing (after a group is added to a group configuration).
This test actually belongs over in common, but it relies on a mutable modulestore.
TODO: move tests that can go over to common after the mixed modulestore work is done. # pylint: disable=fixme
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
# Add a group to the first group configuration.
split_test.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'pie')]
)
]
self.store.update_item(split_test, self.user.id)
# group_id_to_child and children have not changed yet.
split_test = self._assert_children(2)
group_id_to_child = split_test.group_id_to_child.copy()
self.assertEqual(2, len(group_id_to_child))
# Test environment and Studio use different module systems
# (CachingDescriptorSystem is used in tests, PreviewModuleSystem in Studio).
# CachingDescriptorSystem doesn't have user service, that's needed for
# SplitTestModule. So, in this line of code we add this service manually.
split_test.runtime._services['user'] = DjangoXBlockUserService(self.user) # pylint: disable=protected-access
# Call add_missing_groups method to add the missing group.
split_test.add_missing_groups(self.request)
split_test = self._assert_children(3)
self.assertNotEqual(group_id_to_child, split_test.group_id_to_child)
group_id_to_child = split_test.group_id_to_child
self.assertEqual(split_test.children[2], group_id_to_child["2"])
# Call add_missing_groups again -- it should be a no-op.
split_test.add_missing_groups(self.request)
split_test = self._assert_children(3)
self.assertEqual(group_id_to_child, split_test.group_id_to_child)
@ddt.ddt
class TestComponentHandler(TestCase):
def setUp(self):
super(TestComponentHandler, self).setUp()
self.request_factory = RequestFactory()
patcher = patch('contentstore.views.component.modulestore')
self.modulestore = patcher.start()
self.addCleanup(patcher.stop)
# component_handler calls modulestore.get_item to get the descriptor of the requested xBlock.
# Here, we mock the return value of modulestore.get_item so it can be used to mock the handler
# of the xBlock descriptor.
self.descriptor = self.modulestore.return_value.get_item.return_value
self.usage_key_string = unicode(
Location('dummy_org', 'dummy_course', 'dummy_run', 'dummy_category', 'dummy_name')
)
self.user = UserFactory()
self.request = self.request_factory.get('/dummy-url')
self.request.user = self.user
def test_invalid_handler(self):
self.descriptor.handle.side_effect = NoSuchHandlerError
with self.assertRaises(Http404):
component_handler(self.request, self.usage_key_string, 'invalid_handler')
@ddt.data('GET', 'POST', 'PUT', 'DELETE')
def test_request_method(self, method):
def check_handler(handler, request, suffix):
self.assertEquals(request.method, method)
return Response()
self.descriptor.handle = check_handler
# Have to use the right method to create the request to get the HTTP method that we want
req_factory_method = getattr(self.request_factory, method.lower())
request = req_factory_method('/dummy-url')
request.user = self.user
component_handler(request, self.usage_key_string, 'dummy_handler')
@ddt.data(200, 404, 500)
def test_response_code(self, status_code):
def create_response(handler, request, suffix):
return Response(status_code=status_code)
self.descriptor.handle = create_response
self.assertEquals(component_handler(self.request, self.usage_key_string, 'dummy_handler').status_code, status_code)
class TestComponentTemplates(CourseTestCase):
"""
Unit tests for the generation of the component templates for a course.
"""
def setUp(self):
super(TestComponentTemplates, self).setUp()
self.templates = get_component_templates(self.course)
def get_templates_of_type(self, template_type):
"""
Returns the templates for the specified type, or None if none is found.
"""
template_dict = next((template for template in self.templates if template.get('type') == template_type), None)
return template_dict.get('templates') if template_dict else None
def get_template(self, templates, display_name):
"""
Returns the template which has the specified display name.
"""
return next((template for template in templates if template.get('display_name') == display_name), None)
def test_basic_components(self):
"""
Test the handling of the basic component templates.
"""
self.assertIsNotNone(self.get_templates_of_type('discussion'))
self.assertIsNotNone(self.get_templates_of_type('html'))
self.assertIsNotNone(self.get_templates_of_type('problem'))
self.assertIsNotNone(self.get_templates_of_type('video'))
self.assertIsNone(self.get_templates_of_type('advanced'))
def test_advanced_components(self):
"""
Test the handling of advanced component templates.
"""
self.course.advanced_modules.append('word_cloud')
self.templates = get_component_templates(self.course)
advanced_templates = self.get_templates_of_type('advanced')
self.assertEqual(len(advanced_templates), 1)
world_cloud_template = advanced_templates[0]
self.assertEqual(world_cloud_template.get('category'), 'word_cloud')
self.assertEqual(world_cloud_template.get('display_name'), u'Word cloud')
self.assertIsNone(world_cloud_template.get('boilerplate_name', None))
# Verify that non-advanced components are not added twice
self.course.advanced_modules.append('video')
self.course.advanced_modules.append('openassessment')
self.templates = get_component_templates(self.course)
advanced_templates = self.get_templates_of_type('advanced')
self.assertEqual(len(advanced_templates), 1)
only_template = advanced_templates[0]
self.assertNotEqual(only_template.get('category'), 'video')
self.assertNotEqual(only_template.get('category'), 'openassessment')
def test_advanced_problems(self):
"""
Test the handling of advanced problem templates.
"""
problem_templates = self.get_templates_of_type('problem')
circuit_template = self.get_template(problem_templates, u'Circuit Schematic Builder')
self.assertIsNotNone(circuit_template)
self.assertEqual(circuit_template.get('category'), 'problem')
self.assertEqual(circuit_template.get('boilerplate_name'), 'circuitschematic.yaml')
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"])
def test_deprecated_no_advance_component_button(self):
"""
Test that there will be no `Advanced` button on unit page if units are
deprecated provided that they are the only modules in `Advanced Module List`
"""
self.course.advanced_modules.extend(['poll', 'survey'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertNotIn('Advanced', button_names)
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"])
def test_cannot_create_deprecated_problems(self):
"""
Test that we can't create problems if they are deprecated
"""
self.course.advanced_modules.extend(['annotatable', 'poll', 'survey'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names)
self.assertEqual(len(templates[0]['templates']), 1)
template_display_names = [template['display_name'] for template in templates[0]['templates']]
self.assertEqual(template_display_names, ['Annotation'])
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', [])
def test_create_non_deprecated_problems(self):
"""
Test that we can create problems if they are not deprecated
"""
self.course.advanced_modules.extend(['annotatable', 'poll', 'survey'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names)
self.assertEqual(len(templates[0]['templates']), 3)
template_display_names = [template['display_name'] for template in templates[0]['templates']]
self.assertEqual(template_display_names, ['Annotation', 'Poll', 'Survey'])
@ddt.ddt
class TestXBlockInfo(ItemTest):
"""
Unit tests for XBlock's outline handling.
"""
def setUp(self):
super(TestXBlockInfo, self).setUp()
user_id = self.user.id
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1", user_id=user_id
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1", user_id=user_id
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Unit 1', user_id=user_id
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category='video', display_name='My Video', user_id=user_id
)
def test_json_responses(self):
outline_url = reverse_usage_url('xblock_outline_handler', self.usage_key)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.validate_course_xblock_info(json_response, course_outline=True)
@ddt.data(
(ModuleStoreEnum.Type.split, 4, 4),
(ModuleStoreEnum.Type.mongo, 5, 7),
)
@ddt.unpack
def test_xblock_outline_handler_mongo_calls(self, store_type, chapter_queries, chapter_queries_1):
with self.store.default_store(store_type):
course = CourseFactory.create()
chapter = ItemFactory.create(
parent_location=course.location, category='chapter', display_name='Week 1'
)
outline_url = reverse_usage_url('xblock_outline_handler', chapter.location)
with check_mongo_calls(chapter_queries):
self.client.get(outline_url, HTTP_ACCEPT='application/json')
sequential = ItemFactory.create(
parent_location=chapter.location, category='sequential', display_name='Sequential 1'
)
ItemFactory.create(
parent_location=sequential.location, category='vertical', display_name='Vertical 1'
)
# calls should be same after adding two new children for split only.
with check_mongo_calls(chapter_queries_1):
self.client.get(outline_url, HTTP_ACCEPT='application/json')
def test_entrance_exam_chapter_xblock_info(self):
chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Entrance Exam",
user_id=self.user.id, is_entrance_exam=True
)
chapter = modulestore().get_item(chapter.location)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# entrance exam chapter should not be deletable, draggable and childAddable.
actions = xblock_info['actions']
self.assertEqual(actions['deletable'], False)
self.assertEqual(actions['draggable'], False)
self.assertEqual(actions['childAddable'], False)
self.assertEqual(xblock_info['display_name'], 'Entrance Exam')
self.assertIsNone(xblock_info.get('is_header_visible', None))
def test_none_entrance_exam_chapter_xblock_info(self):
chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Test Chapter",
user_id=self.user.id
)
chapter = modulestore().get_item(chapter.location)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# chapter should be deletable, draggable and childAddable if not an entrance exam.
actions = xblock_info['actions']
self.assertEqual(actions['deletable'], True)
self.assertEqual(actions['draggable'], True)
self.assertEqual(actions['childAddable'], True)
# chapter xblock info should not contains the key of 'is_header_visible'.
self.assertIsNone(xblock_info.get('is_header_visible', None))
def test_entrance_exam_sequential_xblock_info(self):
chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Entrance Exam",
user_id=self.user.id, is_entrance_exam=True, in_entrance_exam=True
)
subsection = ItemFactory.create(
parent_location=chapter.location, category='sequential', display_name="Subsection - Entrance Exam",
user_id=self.user.id, in_entrance_exam=True
)
subsection = modulestore().get_item(subsection.location)
xblock_info = create_xblock_info(
subsection,
include_child_info=True,
include_children_predicate=ALWAYS
)
# in case of entrance exam subsection, header should be hidden.
self.assertEqual(xblock_info['is_header_visible'], False)
self.assertEqual(xblock_info['display_name'], 'Subsection - Entrance Exam')
def test_none_entrance_exam_sequential_xblock_info(self):
subsection = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Subsection - Exam",
user_id=self.user.id
)
subsection = modulestore().get_item(subsection.location)
xblock_info = create_xblock_info(
subsection,
include_child_info=True,
include_children_predicate=ALWAYS,
parent_xblock=self.chapter
)
# sequential xblock info should not contains the key of 'is_header_visible'.
self.assertIsNone(xblock_info.get('is_header_visible', None))
def test_chapter_xblock_info(self):
chapter = modulestore().get_item(self.chapter.location)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
)
self.validate_chapter_xblock_info(xblock_info)
def test_sequential_xblock_info(self):
sequential = modulestore().get_item(self.sequential.location)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
)
self.validate_sequential_xblock_info(xblock_info)
def test_vertical_xblock_info(self):
vertical = modulestore().get_item(self.vertical.location)
xblock_info = create_xblock_info(
vertical,
include_child_info=True,
include_children_predicate=ALWAYS,
include_ancestor_info=True,
user=self.user
)
add_container_page_publishing_info(vertical, xblock_info)
self.validate_vertical_xblock_info(xblock_info)
def test_component_xblock_info(self):
video = modulestore().get_item(self.video.location)
xblock_info = create_xblock_info(
video,
include_child_info=True,
include_children_predicate=ALWAYS
)
self.validate_component_xblock_info(xblock_info)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_validate_start_date(self, store_type):
"""
Validate if start-date year is less than 1900 reset the date to DEFAULT_START_DATE.
"""
with self.store.default_store(store_type):
course = CourseFactory.create()
chapter = ItemFactory.create(
parent_location=course.location, category='chapter', display_name='Week 1'
)
chapter.start = datetime(year=1899, month=1, day=1, tzinfo=UTC)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
include_ancestor_info=True,
user=self.user
)
self.assertEqual(xblock_info['start'], DEFAULT_START_DATE.strftime('%Y-%m-%dT%H:%M:%SZ'))
def validate_course_xblock_info(self, xblock_info, has_child_info=True, course_outline=False):
"""
Validate that the xblock info is correct for the test course.
"""
self.assertEqual(xblock_info['category'], 'course')
self.assertEqual(xblock_info['id'], unicode(self.course.location))
self.assertEqual(xblock_info['display_name'], self.course.display_name)
self.assertTrue(xblock_info['published'])
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info, course_outline=course_outline)
def validate_chapter_xblock_info(self, xblock_info, has_child_info=True):
"""
Validate that the xblock info is correct for the test chapter.
"""
self.assertEqual(xblock_info['category'], 'chapter')
self.assertEqual(xblock_info['id'], unicode(self.chapter.location))
self.assertEqual(xblock_info['display_name'], 'Week 1')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
self.assertEqual(xblock_info['course_graders'], ['Homework', 'Lab', 'Midterm Exam', 'Final Exam'])
self.assertEqual(xblock_info['start'], '2030-01-01T00:00:00Z')
self.assertEqual(xblock_info['graded'], False)
self.assertEqual(xblock_info['due'], None)
self.assertEqual(xblock_info['format'], None)
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info)
def validate_sequential_xblock_info(self, xblock_info, has_child_info=True):
"""
Validate that the xblock info is correct for the test sequential.
"""
self.assertEqual(xblock_info['category'], 'sequential')
self.assertEqual(xblock_info['id'], unicode(self.sequential.location))
self.assertEqual(xblock_info['display_name'], 'Lesson 1')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info)
def validate_vertical_xblock_info(self, xblock_info):
"""
Validate that the xblock info is correct for the test vertical.
"""
self.assertEqual(xblock_info['category'], 'vertical')
self.assertEqual(xblock_info['id'], unicode(self.vertical.location))
self.assertEqual(xblock_info['display_name'], 'Unit 1')
self.assertTrue(xblock_info['published'])
self.assertEqual(xblock_info['edited_by'], 'testuser')
# Validate that the correct ancestor info has been included
ancestor_info = xblock_info.get('ancestor_info', None)
self.assertIsNotNone(ancestor_info)
ancestors = ancestor_info['ancestors']
self.assertEqual(len(ancestors), 3)
self.validate_sequential_xblock_info(ancestors[0], has_child_info=True)
self.validate_chapter_xblock_info(ancestors[1], has_child_info=False)
self.validate_course_xblock_info(ancestors[2], has_child_info=False)
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=True, has_ancestor_info=True)
def validate_component_xblock_info(self, xblock_info):
"""
Validate that the xblock info is correct for the test component.
"""
self.assertEqual(xblock_info['category'], 'video')
self.assertEqual(xblock_info['id'], unicode(self.video.location))
self.assertEqual(xblock_info['display_name'], 'My Video')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info)
def validate_xblock_info_consistency(self, xblock_info, has_ancestor_info=False, has_child_info=False,
course_outline=False):
"""
Validate that the xblock info is internally consistent.
"""
self.assertIsNotNone(xblock_info['display_name'])
self.assertIsNotNone(xblock_info['id'])
self.assertIsNotNone(xblock_info['category'])
self.assertTrue(xblock_info['published'])
if has_ancestor_info:
self.assertIsNotNone(xblock_info.get('ancestor_info', None))
ancestors = xblock_info['ancestor_info']['ancestors']
for ancestor in xblock_info['ancestor_info']['ancestors']:
self.validate_xblock_info_consistency(
ancestor,
has_child_info=(ancestor == ancestors[0]), # Only the direct ancestor includes children
course_outline=course_outline
)
else:
self.assertIsNone(xblock_info.get('ancestor_info', None))
if has_child_info:
self.assertIsNotNone(xblock_info.get('child_info', None))
if xblock_info['child_info'].get('children', None):
for child_response in xblock_info['child_info']['children']:
self.validate_xblock_info_consistency(
child_response,
has_child_info=(not child_response.get('child_info', None) is None),
course_outline=course_outline
)
else:
self.assertIsNone(xblock_info.get('child_info', None))
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_SPECIAL_EXAMS': True})
def test_proctored_exam_xblock_info(self):
self.course.enable_proctored_exams = True
self.course.save()
self.store.update_item(self.course, self.user.id)
course = modulestore().get_item(self.course.location)
xblock_info = create_xblock_info(
course,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# exam proctoring should be enabled and time limited.
self.assertEqual(xblock_info['enable_proctored_exams'], True)
sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential',
display_name="Test Lesson 1", user_id=self.user.id,
is_proctored_exam=True, is_time_limited=True,
default_time_limit_minutes=100
)
sequential = modulestore().get_item(sequential.location)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# exam proctoring should be enabled and time limited.
self.assertEqual(xblock_info['is_proctored_exam'], True)
self.assertEqual(xblock_info['is_time_limited'], True)
self.assertEqual(xblock_info['default_time_limit_minutes'], 100)
class TestLibraryXBlockInfo(ModuleStoreTestCase):
"""
Unit tests for XBlock Info for XBlocks in a content library
"""
def setUp(self):
super(TestLibraryXBlockInfo, self).setUp()
user_id = self.user.id
self.library = LibraryFactory.create()
self.top_level_html = ItemFactory.create(
parent_location=self.library.location, category='html', user_id=user_id, publish_item=False
)
self.vertical = ItemFactory.create(
parent_location=self.library.location, category='vertical', user_id=user_id, publish_item=False
)
self.child_html = ItemFactory.create(
parent_location=self.vertical.location, category='html', display_name='Test HTML Child Block',
user_id=user_id, publish_item=False
)
def test_lib_xblock_info(self):
html_block = modulestore().get_item(self.top_level_html.location)
xblock_info = create_xblock_info(html_block)
self.validate_component_xblock_info(xblock_info, html_block)
self.assertIsNone(xblock_info.get('child_info', None))
def test_lib_child_xblock_info(self):
html_block = modulestore().get_item(self.child_html.location)
xblock_info = create_xblock_info(html_block, include_ancestor_info=True, include_child_info=True)
self.validate_component_xblock_info(xblock_info, html_block)
self.assertIsNone(xblock_info.get('child_info', None))
ancestors = xblock_info['ancestor_info']['ancestors']
self.assertEqual(len(ancestors), 2)
self.assertEqual(ancestors[0]['category'], 'vertical')
self.assertEqual(ancestors[0]['id'], unicode(self.vertical.location))
self.assertEqual(ancestors[1]['category'], 'library')
def validate_component_xblock_info(self, xblock_info, original_block):
"""
Validate that the xblock info is correct for the test component.
"""
self.assertEqual(xblock_info['category'], original_block.category)
self.assertEqual(xblock_info['id'], unicode(original_block.location))
self.assertEqual(xblock_info['display_name'], original_block.display_name)
self.assertIsNone(xblock_info.get('has_changes', None))
self.assertIsNone(xblock_info.get('published', None))
self.assertIsNone(xblock_info.get('published_on', None))
self.assertIsNone(xblock_info.get('graders', None))
class TestLibraryXBlockCreation(ItemTest):
"""
Tests the adding of XBlocks to Library
"""
def test_add_xblock(self):
"""
Verify we can add an XBlock to a Library.
"""
lib = LibraryFactory.create()
self.create_xblock(parent_usage_key=lib.location, display_name='Test', category="html")
lib = self.store.get_library(lib.location.library_key)
self.assertTrue(lib.children)
xblock_locator = lib.children[0]
self.assertEqual(self.store.get_item(xblock_locator).display_name, 'Test')
def test_no_add_discussion(self):
"""
Verify we cannot add a discussion module to a Library.
"""
lib = LibraryFactory.create()
response = self.create_xblock(parent_usage_key=lib.location, display_name='Test', category='discussion')
self.assertEqual(response.status_code, 400)
lib = self.store.get_library(lib.location.library_key)
self.assertFalse(lib.children)
def test_no_add_advanced(self):
lib = LibraryFactory.create()
lib.advanced_modules = ['lti']
lib.save()
response = self.create_xblock(parent_usage_key=lib.location, display_name='Test', category='lti')
self.assertEqual(response.status_code, 400)
lib = self.store.get_library(lib.location.library_key)
self.assertFalse(lib.children)
class TestXBlockPublishingInfo(ItemTest):
"""
Unit tests for XBlock's outline handling.
"""
FIRST_SUBSECTION_PATH = [0]
FIRST_UNIT_PATH = [0, 0]
SECOND_UNIT_PATH = [0, 1]
def _create_child(self, parent, category, display_name, publish_item=False, staff_only=False):
"""
Creates a child xblock for the given parent.
"""
child = ItemFactory.create(
parent_location=parent.location, category=category, display_name=display_name,
user_id=self.user.id, publish_item=publish_item
)
if staff_only:
self._enable_staff_only(child.location)
# In case the staff_only state was set, return the updated xblock.
return modulestore().get_item(child.location)
def _get_child_xblock_info(self, xblock_info, index):
"""
Returns the child xblock info at the specified index.
"""
children = xblock_info['child_info']['children']
self.assertTrue(len(children) > index)
return children[index]
def _get_xblock_info(self, location):
"""
Returns the xblock info for the specified location.
"""
return create_xblock_info(
modulestore().get_item(location),
include_child_info=True,
include_children_predicate=ALWAYS,
)
def _get_xblock_outline_info(self, location):
"""
Returns the xblock info for the specified location as neeeded for the course outline page.
"""
return create_xblock_info(
modulestore().get_item(location),
include_child_info=True,
include_children_predicate=ALWAYS,
course_outline=True
)
def _set_release_date(self, location, start):
"""
Sets the release date for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.start = start
self.store.update_item(xblock, self.user.id)
def _enable_staff_only(self, location):
"""
Enables staff only for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.visible_to_staff_only = True
self.store.update_item(xblock, self.user.id)
def _set_display_name(self, location, display_name):
"""
Sets the display name for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.display_name = display_name
self.store.update_item(xblock, self.user.id)
def _verify_xblock_info_state(self, xblock_info, xblock_info_field, expected_state, path=None, should_equal=True):
"""
Verify the state of an xblock_info field. If no path is provided then the root item will be verified.
If should_equal is True, assert that the current state matches the expected state, otherwise assert that they
do not match.
"""
if path:
direct_child_xblock_info = self._get_child_xblock_info(xblock_info, path[0])
remaining_path = path[1:] if len(path) > 1 else None
self._verify_xblock_info_state(direct_child_xblock_info, xblock_info_field, expected_state, remaining_path, should_equal)
else:
if should_equal:
self.assertEqual(xblock_info[xblock_info_field], expected_state)
else:
self.assertNotEqual(xblock_info[xblock_info_field], expected_state)
def _verify_has_staff_only_message(self, xblock_info, expected_state, path=None):
"""
Verify the staff_only_message field of xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'staff_only_message', expected_state, path)
def _verify_visibility_state(self, xblock_info, expected_state, path=None, should_equal=True):
"""
Verify the publish state of an item in the xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'visibility_state', expected_state, path, should_equal)
def _verify_explicit_staff_lock_state(self, xblock_info, expected_state, path=None, should_equal=True):
"""
Verify the explicit staff lock state of an item in the xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'has_explicit_staff_lock', expected_state, path, should_equal)
def test_empty_chapter(self):
empty_chapter = self._create_child(self.course, 'chapter', "Empty Chapter")
xblock_info = self._get_xblock_info(empty_chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled)
def test_empty_sequential(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
self._create_child(chapter, 'sequential', "Empty Sequential")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled, path=self.FIRST_SUBSECTION_PATH)
def test_published_unit(self):
"""
Tests the visibility state of a published unit with release date in the future.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.ready)
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_released_unit(self):
"""
Tests the visibility state of a published unit with release date in the past.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.live)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_unpublished_changes(self):
"""
Tests the visibility state of a published unit with draft (unpublished) changes.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
unit = self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
# Setting the display name creates a draft version of unit.
self._set_display_name(unit.location, 'Updated Unit')
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_partially_released_section(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
released_sequential = self._create_child(chapter, 'sequential', "Released Sequential")
self._create_child(released_sequential, 'vertical', "Released Unit", publish_item=True)
self._create_child(released_sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1))
published_sequential = self._create_child(chapter, 'sequential', "Published Sequential")
self._create_child(published_sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(published_sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(published_sequential.location, datetime.now(UTC) + timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
# Verify the state of the released sequential
self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0])
self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0, 0])
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[0, 1])
# Verify the state of the published sequential
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1])
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1, 0])
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[1, 1])
# Finally verify the state of the chapter
self._verify_visibility_state(xblock_info, VisibilityState.ready)
def test_staff_only_section(self):
"""
Tests that an explicitly staff-locked section and all of its children are visible to staff only.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter", staff_only=True)
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
vertical = self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, True)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(chapter), vertical_info["staff_lock_from"])
def test_no_staff_only_section(self):
"""
Tests that a section with a staff-locked subsection and a visible subsection is not staff locked itself.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
self._create_child(chapter, 'sequential', "Test Visible Sequential")
self._create_child(chapter, 'sequential', "Test Staff Locked Sequential", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[0], should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[1])
def test_staff_only_subsection(self):
"""
Tests that an explicitly staff-locked subsection and all of its children are visible to staff only.
In this case the parent section is also visible to staff only because all of its children are staff only.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential", staff_only=True)
vertical = self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False)
self._verify_explicit_staff_lock_state(xblock_info, True, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(sequential), vertical_info["staff_lock_from"])
def test_no_staff_only_subsection(self):
"""
Tests that a subsection with a staff-locked unit and a visible unit is not staff locked itself.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit")
self._create_child(sequential, 'vertical', "Locked Unit", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.FIRST_SUBSECTION_PATH, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.FIRST_UNIT_PATH, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.SECOND_UNIT_PATH)
def test_staff_only_unit(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
vertical = self._create_child(sequential, 'vertical', "Unit", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, True, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(vertical), vertical_info["staff_lock_from"])
def test_unscheduled_section_with_live_subsection(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(sequential.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_unreleased_section_with_live_subsection(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))
self._set_release_date(sequential.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_locked_section_staff_only_message(self):
"""
Tests that a locked section has a staff only message and its descendants do not.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter", staff_only=True)
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_outline_info(chapter.location)
self._verify_has_staff_only_message(xblock_info, True)
self._verify_has_staff_only_message(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_has_staff_only_message(xblock_info, False, path=self.FIRST_UNIT_PATH)
def test_locked_unit_staff_only_message(self):
"""
Tests that a lone locked unit has a staff only message along with its ancestors.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit", staff_only=True)
xblock_info = self._get_xblock_outline_info(chapter.location)
self._verify_has_staff_only_message(xblock_info, True)
self._verify_has_staff_only_message(xblock_info, True, path=self.FIRST_SUBSECTION_PATH)
self._verify_has_staff_only_message(xblock_info, True, path=self.FIRST_UNIT_PATH)
| agpl-3.0 | -9,098,306,685,839,573,000 | 45.871937 | 133 | 0.645723 | false |
memphis-iis/GLUDB | gludb/data.py | 1 | 5085 | """gludb.data - "core" functionality.
If you're unsure what to use, you should look into
gludb.simple. This module is for those needing advanced functionality or
customization
"""
from abc import ABCMeta, abstractmethod
from .config import get_mapping
# pylama:ignore=E501
class DeleteNotSupported(Exception): # NOQA
"""Exception thrown when delete is not supported by the backend."""
pass
# A little magic for using metaclasses with both Python 2 and 3
def _with_metaclass(meta, *bases):
"""Taken from future.utils, who took it from jinja2/_compat.py (BSD license)."""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
class Storable(_with_metaclass(ABCMeta)):
"""Our abstract base class.
It marks subclasses as persistable and storable. Note that the DBObject
annotation in gludb.simple registers all annotated classes as 'virtual base
classes' of Storage so that you can test them with isinstance(obj, Storable)
"""
# This is field we use for the original version of the object (saved after
# retrieval and before any edits occur)
ORIG_VER_FIELD_NAME = "_prev_version"
@classmethod
@abstractmethod
def get_table_name(self):
"""Return the name of the table/collection/etc where objects should be saved/loaded."""
pass
@abstractmethod
def get_id(self):
"""The instance should return the current key/ID for the instance.
If a 'falsey' value is return, on save one will be created and set via a
call to self.set_id
"""
pass
@abstractmethod
def set_id(self, new_id):
"""The instance should accept a new key/ID. See also get_id."""
pass
@abstractmethod
def to_data(self):
"""The instance should return JSON representation of it's internal state. See also from_data."""
pass
@classmethod
@abstractmethod
def from_data(self):
"""Return a new instance of the subclass populated from the JSON representation."""
pass
@classmethod
def index_names(self):
"""Return an iterable of index names.
Optional method. These names should correspond to the names used in the
dictionary returned by the instance method `indexes` (below).
"""
return None
def indexes(self):
"""Return a dictionary of index name values that can be used in a query.
Optional method. Note that this is not considered required data, so a
backend could ignore indexes if necessary.
"""
return None
def _ensure_table(cls):
get_mapping(cls).ensure_table(cls)
def _post_load(obj):
# Perform all necessary post load operations we want done when reading
# from the database. We return the changed object, but make NO EFFORT
# to keep from mutating the original object.
if obj:
setattr(obj, Storable.ORIG_VER_FIELD_NAME, obj.to_data())
return obj
def _find_one(cls, id):
return _post_load(get_mapping(cls).find_one(cls, id))
def _find_all(cls):
return [
_post_load(obj)
for obj in get_mapping(cls).find_all(cls)
]
def _find_by_index(cls, index_name, value):
return [
_post_load(obj)
for obj in get_mapping(cls).find_by_index(cls, index_name, value)
]
def _save(self):
# Actual save
get_mapping(self.__class__).save(self)
# Now we have a new original version
setattr(self, Storable.ORIG_VER_FIELD_NAME, self.to_data())
def _delete(self):
# Actual delete - and note no version changes
get_mapping(self.__class__).delete(self)
# TODO: we need a function ensure_package_db - it should work mainly like the
# package_add functionality in Backup. Once the class list is create,
# we would loop over every class and call ensure_table
def DatabaseEnabled(cls):
"""Given persistence methods to classes with this annotation.
All this really does is add some functions that forward to the mapped
database class.
"""
if not issubclass(cls, Storable):
raise ValueError(
"%s is not a subclass of gludb.datab.Storage" % repr(cls)
)
cls.ensure_table = classmethod(_ensure_table)
cls.find_one = classmethod(_find_one)
cls.find_all = classmethod(_find_all)
cls.find_by_index = classmethod(_find_by_index)
cls.save = _save
cls.delete = _delete
return cls
def orig_version(obj):
"""Return the original version of an object.
Original version is defined as what was read from the database before any
user edits. If there isn't a previous version (for instance, newly created
objects don't have a previous version), then None is returned. Mainly useful
for testing.
"""
return getattr(obj, Storable.ORIG_VER_FIELD_NAME, None)
| apache-2.0 | -4,694,618,238,678,025,000 | 28.224138 | 104 | 0.65939 | false |
Moliholy/cvmfs | add-ons/tools/get_referenced_hashes.py | 11 | 1880 | #!/usr/bin/env python
import sys
import cvmfs
def usage():
print sys.argv[0] + " <local repo name | remote repo url> [root catalog]"
print "This script walks the catalogs and generates a list of all referenced content hashes."
# get referenced hashes from a single catalog (files, chunks, nested catalogs)
def get_hashes_for_catalog(catalog):
print >> sys.stderr, "Processing" , catalog.hash , catalog
query = " SELECT DISTINCT \
lower(hex(hash)) \
FROM catalog \
WHERE hash != 0 \
UNION \
SELECT DISTINCT \
lower(hex(hash)) || 'P' \
FROM chunks \
WHERE hash != 0 \
UNION \
SELECT DISTINCT \
sha1 || 'C' \
FROM nested_catalogs;"
return { res[0] for res in catalog.run_sql(query) }
def get_hashes_for_catalog_tree(repo, root_catalog):
hashes = { root_catalog.hash + "C" }
for catalog in repo.catalogs(root_catalog):
hashes = hashes | get_hashes_for_catalog(catalog)
if not catalog.has_nested():
repo.close_catalog(catalog)
return hashes
def get_hashes_for_revision(repo, root_hash = None):
root_catalog = repo.retrieve_catalog(root_hash) if root_hash else repo.retrieve_root_catalog()
return get_hashes_for_catalog_tree(repo, root_catalog)
# check input values
if len(sys.argv) != 2 and len(sys.argv) != 3:
usage()
sys.exit(1)
# get input parameters
repo_identifier = sys.argv[1]
root_catalog_hash = sys.argv[2] if len(sys.argv) == 3 else None
repo = cvmfs.open_repository(repo_identifier)
hashes = get_hashes_for_revision(repo, root_catalog_hash)
print '\n'.join(hashes)
| bsd-3-clause | -7,449,870,017,551,299,000 | 34.471698 | 98 | 0.570213 | false |
taosx/trilulilu-downloader | trilulilu-downloader.py | 1 | 5941 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Python Trilulilu Downloader
Support for Video and Audio
Support for online view
Author: sharkyz of rstforums.com
'''
import re
from requests_futures .sessions import FuturesSession
from concurrent.futures import ThreadPoolExecutor
from bs4 import BeautifulSoup
import time
import lxml
import sys #, os
url = 'http://www.trilulilu.ro/pisica-asta-chiar-isi-doreste-sa-vorbeasca'
class commands(object):
def __init__(self, httpadress):
self.httpadress = httpadress
#@profile # Used for profiling the app line by line // Ignore
def main_function(self): # Acess, Find, Rewrite, Download
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=16))
page = session.get(self.httpadress)
page_result = page.result()
page_result.encoding = 'utf-8'
soup = BeautifulSoup(page_result.text, 'lxml')
locatescript = soup.find(text=re.compile('swfobject.embedSWF'))
keys = re.findall(r'"([^,]*?)":', locatescript)
values = re.findall(r'(?<=:)(?:"(.*?)"|\d+)', locatescript)
vovu = dict(zip(keys, values))
video_test = {'videosrc_0':'http://fs{servers}.trilulilu.ro/stream.php?type=video&'
'source=site&hash={hashs}&username={userids}&key={keys}'
'&format=flv-vp6&sig=&exp='.format(servers=vovu['server'],
hashs=vovu['hash'],
userids=vovu['userid'],
keys=vovu['key']),
'videosrc_1':'http://fs{servers}.trilulilu.ro/stream.php?type=video&'
'source=site&hash={hashs}&username={userids}&key={keys}'
'&format=mp4-360p&sig=&exp='.format(servers=vovu['server'],
hashs=vovu['hash'],
userids=vovu['userid'],
keys=vovu['key'])}
# Name the file
page_title = soup.title.string # Title of trilulilu page
title_chooser = page_title.split(' - ') # Split the title wherever '-' and create a list with elements
# Search for the right file to download & Download it
for link in video_test:
respond = session.get(video_test[link], stream=True)
file_size = int(respond.result().headers.get('Content-Length', 0))
if file_size > 1048576:
# Check if the link was the mp4 or the flv format and choose name
if 'mp4' in video_test[link]:
local_name_file = '{} - {}.mp4'.format(title_chooser[0],title_chooser[1])
elif 'flv' in video_test[link]:
local_name_file = '{} - {}.flv'.format(title_chooser[0],title_chooser[1])
else:
print('Download stopped, not recognizable format!')
with open(local_name_file, 'wb') as f:
dl = 0
count = 0
start_time_local = time.mktime(time.localtime())
# Writing file
for chunk in respond.result().iter_content(chunk_size=32 * 1024):
if chunk:
count += 1
dl += len(chunk)
f.write(chunk)
end_time_local = time.mktime(time.localtime())
f.flush()
#Configuring Progressbar
if end_time_local > start_time_local:
dl_speed = round((dl / (end_time_local - start_time_local)) / 1000, 2)
sys.stdout.flush()
#print('Downloading now...\nFile:{}\nSize:{}M'.format(local_name_file, round(file_size / 1000/ 1000, 2)))
percent_text = round(dl * 100 / file_size)
percent_text4 = round(percent_text/4)
#print(percent_text4)
#teleies1='.' * x
#asterisks1='*' * int(i/2)
i = round(percent_text4)
x = 12 - i
z = 25 - i
def asterisks0():
if percent_text <= 50:
return '#' * int(i)
else:
return '#' * 12
def teleies0():
if percent_text < 10:
return '-' * int(x + 1)
elif percent_text <= 50:
return '-' * int(x)
else:
return ''
def asterisks1():
if percent_text > 50:
str_asterisk1 = '#' * (i - 12)
return '#' * (i - 12)
else:
return ''
def teleies1():
if percent_text > 50:
return '-' * z
else:
return '-' * 12
# Progressbar printing
sys.stdout.write('[{}{}{}%{}{}] [ Speed: {}Kbps ] \r'.format(asterisks0(),teleies0(),percent_text,asterisks1(),teleies1(),dl_speed))
sys.stdout.flush()
start = commands(url).main_function()
start
| gpl-3.0 | 6,029,159,800,586,936,000 | 41.134752 | 168 | 0.429557 | false |
bricesarver/pseudo-it | mask.py | 1 | 1327 | import sys
import subprocess
#as a stand-alone: [python3+] mask.py prefix iterations soft; soft is a 1 (true) or 0 (false)
def main():
subprocess.check_call('''grep "\./\." {}.allcalls.filtered.vcf | awk '{{OFS="\t"; if ($0 !~ /\#/); print $1, $2-1, $2}}' | bedtools merge -i - > nocalls.combined.bed'''.format(prefix), shell=True)
subprocess.check_call('''grep "allcallfilter" {}.allcalls.filtered.vcf | awk '{{OFS="\t"; if ($0 !~ /\#/); print $1, $2-1, $2}}' | bedtools merge -i - > filtered.combined.bed'''.format(prefix), shell=True)
subprocess.check_call("cat nocalls.combined.bed filtered.combined.bed > both.bed", shell=True)
subprocess.check_call("bedtools sort -i both.bed | bedtools merge -i - > all_positions_to_mask.bed", shell=True)
if soft == 1:
subprocess.check_call("bedtools maskfasta -fi {}.gatk.iteration{}.consensus.FINAL.fa -fo {}.masked.fa -bed all_positions_to_mask.bed -soft".format(prefix, iterations, prefix), shell=True)
else:
subprocess.check_call("bedtools maskfasta -fi {}.gatk.iteration{}.consensus.FINAL.fa -fo {}.masked.fa -bed all_positions_to_mask.bed".format(prefix, iterations, prefix), shell=True)
if __name__=="__main__":
prefix = sys.argv[1]
iterations = sys.argv[2]
soft = sys.argv[3]
print(prefix, iterations, soft)
main() | mit | -458,010,261,812,999,360 | 62.238095 | 209 | 0.657121 | false |
harmy/kbengine | kbe/res/scripts/common/Lib/test/test_inspect.py | 3 | 41988 | import re
import sys
import types
import unittest
import inspect
import linecache
import datetime
import collections
import os
import shutil
from os.path import normcase
from test.support import run_unittest, TESTFN, DirsOnSysPath
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
# C module for test_findsource_binary
import unicodedata
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, isgenerator, isgeneratorfunction, getmembers,
# getdoc, getfile, getmodule, getsourcefile, getcomments, getsource,
# getclasstree, getargspec, getargvalues, formatargspec, formatargvalues,
# currentframe, stack, trace, isdatadescriptor
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
# Normalize file names: on Windows, the case of file names of compiled
# modules depends on the path used to start the python executable.
modfile = normcase(modfile)
def revise(filename, *args):
return (normcase(filename),) + args
import builtins
try:
1/0
except:
tb = sys.exc_info()[2]
git = mod.StupidGit()
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback,
inspect.isgenerator, inspect.isgeneratorfunction])
def istest(self, predicate, exp):
obj = eval(exp)
self.assertTrue(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
if predicate == inspect.isgeneratorfunction and\
other == inspect.isfunction:
continue
self.assertFalse(other(obj), 'not %s(%s)' % (other.__name__, exp))
def generator_function_example(self):
for i in range(2):
yield i
class TestPredicates(IsTestBase):
def test_sixteen(self):
count = len([x for x in dir(inspect) if x.startswith('is')])
# This test is here for remember you to update Doc/library/inspect.rst
# which claims there are 16 such functions
expected = 16
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.iscode, 'mod.spam.__code__')
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.isfunction, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.istraceback, 'tb')
self.istest(inspect.isdatadescriptor, 'collections.defaultdict.default_factory')
self.istest(inspect.isgenerator, '(x for x in range(2))')
self.istest(inspect.isgeneratorfunction, 'generator_function_example')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
else:
self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assertTrue(inspect.isroutine(mod.spam))
self.assertTrue(inspect.isroutine([].count))
def test_isclass(self):
self.istest(inspect.isclass, 'mod.StupidGit')
self.assertTrue(inspect.isclass(list))
class CustomGetattr(object):
def __getattr__(self, attr):
return None
self.assertFalse(inspect.isclass(CustomGetattr()))
def test_get_slot_members(self):
class C(object):
__slots__ = ("a", "b")
x = C()
x.a = 42
members = dict(inspect.getmembers(x))
self.assertIn('a', members)
self.assertNotIn('b', members)
def test_isabstract(self):
from abc import ABCMeta, abstractmethod
class AbstractClassExample(metaclass=ABCMeta):
@abstractmethod
def foo(self):
pass
class ClassExample(AbstractClassExample):
def foo(self):
pass
a = ClassExample()
# Test general behaviour.
self.assertTrue(inspect.isabstract(AbstractClassExample))
self.assertFalse(inspect.isabstract(ClassExample))
self.assertFalse(inspect.isabstract(a))
self.assertFalse(inspect.isabstract(int))
self.assertFalse(inspect.isabstract(5))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assertTrue(len(mod.st) >= 5)
self.assertEqual(revise(*mod.st[0][1:]),
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(revise(*mod.st[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*mod.st[2][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*mod.st[3][1:]),
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(revise(*git.tr[0][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*git.tr[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*git.tr[2][1:]),
(modfile, 18, 'eggs', [' q = y / 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', 'e', 'f'])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, e=4, f=5, *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderModule = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
with open(inspect.getsourcefile(self.fodderModule)) as fp:
self.source = fp.read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderModule = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)])
tree = inspect.getclasstree([cls[1] for cls in classes], 1)
self.assertEqual(tree,
[(object, ()),
[(mod.ParrotDroppings, (object,)),
(mod.StupidGit, (object,)),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_cleandoc(self):
self.assertEqual(inspect.cleandoc('An\n indented\n docstring.'),
'An\nindented\ndocstring.')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["builtins"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(normcase(inspect.getsourcefile(mod.spam)), modfile)
self.assertEqual(normcase(inspect.getsourcefile(git.abuse)), modfile)
fn = "_non_existing_filename_used_for_sourcefile_test.py"
co = compile("None", fn, "exec")
self.assertEqual(inspect.getsourcefile(co), None)
linecache.cache[co.co_filename] = (1, None, "None", co.co_filename)
self.assertEqual(normcase(inspect.getsourcefile(co)), fn)
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from types import ModuleType
name = '__inspect_dummy'
m = sys.modules[name] = ModuleType(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec("def x(): pass", m.__dict__)
self.assertEqual(inspect.getsourcefile(m.x.__code__), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
def test_proceed_with_fake_filename(self):
'''doctest monkeypatches linecache to enable inspection'''
fn, source = '<test>', 'def x(): pass\n'
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if filename == fn:
return source.splitlines(True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
ns = {}
exec(compile(source, fn, 'single'), ns)
inspect.getsource(ns["x"])
finally:
linecache.getlines = getlines
class TestDecorators(GetSourceBase):
fodderModule = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderModule = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderModule = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
@unittest.skipIf(
not hasattr(unicodedata, '__file__') or
unicodedata.__file__[-4:] in (".pyc", ".pyo"),
"unicodedata is not an external binary module")
def test_findsource_binary(self):
self.assertRaises(IOError, inspect.getsource, unicodedata)
self.assertRaises(IOError, inspect.findsource, unicodedata)
def test_findsource_code_in_linecache(self):
lines = ["x=1"]
co = compile(lines[0], "_dynamically_created_file", "exec")
self.assertRaises(IOError, inspect.findsource, co)
self.assertRaises(IOError, inspect.getsource, co)
linecache.cache[co.co_filename] = (1, None, lines, co.co_filename)
self.assertEqual(inspect.findsource(co), (lines,0))
self.assertEqual(inspect.getsource(co), lines[0])
class TestNoEOL(GetSourceBase):
def __init__(self, *args, **kwargs):
self.tempdir = TESTFN + '_dir'
os.mkdir(self.tempdir)
with open(os.path.join(self.tempdir,
'inspect_fodder3%spy' % os.extsep), 'w') as f:
f.write("class X:\n pass # No EOL")
with DirsOnSysPath(self.tempdir):
import inspect_fodder3 as mod3
self.fodderModule = mod3
GetSourceBase.__init__(self, *args, **kwargs)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_class(self):
self.assertSourceEqual(self.fodderModule.X, 1, 2)
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None, formatted=None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
def assertFullArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None,
kwonlyargs_e=[], kwonlydefaults_e=None,
ann_e={}, formatted=None):
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
inspect.getfullargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
self.assertEqual(kwonlyargs, kwonlyargs_e)
self.assertEqual(kwonlydefaults, kwonlydefaults_e)
self.assertEqual(ann, ann_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, ann),
formatted)
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted='(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', 'e', 'f'],
'g', 'h', (3, 4, 5),
'(a, b, c, d=3, e=4, f=5, *g, **h)')
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyworded, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.annotated, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyword_only_arg, [])
def test_getfullargspec(self):
self.assertFullArgSpecEquals(mod2.keyworded, [], varargs_e='arg1',
kwonlyargs_e=['arg2'],
kwonlydefaults_e={'arg2':1},
formatted='(*arg1, arg2=1)')
self.assertFullArgSpecEquals(mod2.annotated, ['arg1'],
ann_e={'arg1' : list},
formatted='(arg1: list)')
self.assertFullArgSpecEquals(mod2.keyword_only_arg, [],
kwonlyargs_e=['arg'],
formatted='(*, arg)')
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
def test_classify_newstyle(self):
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', A), attrs,
'missing plain method: %r' % attrs)
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', C), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', D), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
class TestGetcallargsFunctions(unittest.TestCase):
def assertEqualCallArgs(self, func, call_params_string, locs=None):
locs = dict(locs or {}, func=func)
r1 = eval('func(%s)' % call_params_string, None, locs)
r2 = eval('inspect.getcallargs(func, %s)' % call_params_string, None,
locs)
self.assertEqual(r1, r2)
def assertEqualException(self, func, call_param_string, locs=None):
locs = dict(locs or {}, func=func)
try:
eval('func(%s)' % call_param_string, None, locs)
except Exception as e:
ex1 = e
else:
self.fail('Exception not raised')
try:
eval('inspect.getcallargs(func, %s)' % call_param_string, None,
locs)
except Exception as e:
ex2 = e
else:
self.fail('Exception not raised')
self.assertIs(type(ex1), type(ex2))
self.assertEqual(str(ex1), str(ex2))
del ex1, ex2
def makeCallable(self, signature):
"""Create a function that returns its locals()"""
code = "lambda %s: locals()"
return eval(code % signature)
def test_plain(self):
f = self.makeCallable('a, b=1')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, 'b=3, a=2')
self.assertEqualCallArgs(f, '2, b=3')
# expand *iterable / **mapping
self.assertEqualCallArgs(f, '*(2,)')
self.assertEqualCallArgs(f, '*[2]')
self.assertEqualCallArgs(f, '*(2, 3)')
self.assertEqualCallArgs(f, '*[2, 3]')
self.assertEqualCallArgs(f, '**{"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{"a":2}')
self.assertEqualCallArgs(f, '2, **{"b":3}')
self.assertEqualCallArgs(f, '**{"b":3, "a":2}')
# expand UserList / UserDict
self.assertEqualCallArgs(f, '*collections.UserList([2])')
self.assertEqualCallArgs(f, '*collections.UserList([2, 3])')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2)')
self.assertEqualCallArgs(f, '2, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3)')
def test_varargs(self):
f = self.makeCallable('a, b=1, *c')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, '2, 3, 4')
self.assertEqualCallArgs(f, '*(2,3,4)')
self.assertEqualCallArgs(f, '2, *[3,4]')
self.assertEqualCallArgs(f, '2, 3, *collections.UserList([4])')
def test_varkw(self):
f = self.makeCallable('a, b=1, **c')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, '2, b=3, c=4')
self.assertEqualCallArgs(f, 'b=3, a=2, c=4')
self.assertEqualCallArgs(f, 'c=4, **{"a":2, "b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{"a":3, "c":4}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2, b=3, c=4)')
self.assertEqualCallArgs(f, '2, c=4, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3, c=4)')
def test_varkw_only(self):
# issue11256:
f = self.makeCallable('**c')
self.assertEqualCallArgs(f, '')
self.assertEqualCallArgs(f, 'a=1')
self.assertEqualCallArgs(f, 'a=1, b=2')
self.assertEqualCallArgs(f, 'c=3, **{"a": 1, "b": 2}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=1, b=2)')
self.assertEqualCallArgs(f, 'c=3, **collections.UserDict(a=1, b=2)')
def test_keyword_only(self):
f = self.makeCallable('a=3, *, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, a=3')
self.assertEqualCallArgs(f, 'a=2, c=4')
self.assertEqualCallArgs(f, '4, c=4')
self.assertEqualException(f, '')
self.assertEqualException(f, '3')
self.assertEqualException(f, 'a=3')
self.assertEqualException(f, 'd=4')
f = self.makeCallable('*, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, d=4')
self.assertEqualCallArgs(f, 'd=4, c=3')
def test_multiple_features(self):
f = self.makeCallable('a, b=2, *f, **g')
self.assertEqualCallArgs(f, '2, 3, 7')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), **collections.UserDict('
'y=9, z=10)')
f = self.makeCallable('a, b=2, *f, x, y=99, **g')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), q=0, **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), q=0, **collections.UserDict('
'y=9, z=10)')
def test_errors(self):
f0 = self.makeCallable('')
f1 = self.makeCallable('a, b')
f2 = self.makeCallable('a, b=1')
# f0 takes no arguments
self.assertEqualException(f0, '1')
self.assertEqualException(f0, 'x=1')
self.assertEqualException(f0, '1,x=1')
# f1 takes exactly 2 arguments
self.assertEqualException(f1, '')
self.assertEqualException(f1, '1')
self.assertEqualException(f1, 'a=2')
self.assertEqualException(f1, 'b=3')
# f2 takes at least 1 argument
self.assertEqualException(f2, '')
self.assertEqualException(f2, 'b=3')
for f in f1, f2:
# f1/f2 takes exactly/at most 2 arguments
self.assertEqualException(f, '2, 3, 4')
self.assertEqualException(f, '1, 2, 3, a=1')
self.assertEqualException(f, '2, 3, 4, c=5')
self.assertEqualException(f, '2, 3, 4, a=1, c=5')
# f got an unexpected keyword argument
self.assertEqualException(f, 'c=2')
self.assertEqualException(f, '2, c=3')
self.assertEqualException(f, '2, 3, c=4')
self.assertEqualException(f, '2, c=4, b=3')
self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
# f got multiple values for keyword argument
self.assertEqualException(f, '1, a=2')
self.assertEqualException(f, '1, **{"a":2}')
self.assertEqualException(f, '1, 2, b=3')
# XXX: Python inconsistency
# - for functions and bound methods: unexpected keyword 'c'
# - for unbound methods: multiple values for keyword 'a'
#self.assertEqualException(f, '1, c=3, a=2')
# issue11256:
f3 = self.makeCallable('**c')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
f4 = self.makeCallable('*, a, b=0')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
class Foo(object):
pass
self.cls = Foo
self.inst = Foo()
def makeCallable(self, signature):
assert 'self' not in signature
mk = super(TestGetcallargsMethods, self).makeCallable
self.cls.method = mk('self, ' + signature)
return self.inst.method
class TestGetcallargsUnboundMethods(TestGetcallargsMethods):
def makeCallable(self, signature):
super(TestGetcallargsUnboundMethods, self).makeCallable(signature)
return self.cls.method
def assertEqualCallArgs(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualCallArgs(
*self._getAssertEqualParams(func, call_params_string, locs))
def assertEqualException(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualException(
*self._getAssertEqualParams(func, call_params_string, locs))
def _getAssertEqualParams(self, func, call_params_string, locs=None):
assert 'inst' not in call_params_string
locs = dict(locs or {}, inst=self.inst)
return (func, 'inst,' + call_params_string, locs)
class TestGetattrStatic(unittest.TestCase):
def test_basic(self):
class Thing(object):
x = object()
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'x', None), Thing.x)
with self.assertRaises(AttributeError):
inspect.getattr_static(thing, 'y')
self.assertEqual(inspect.getattr_static(thing, 'y', 3), 3)
def test_inherited(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
something = OtherThing()
self.assertEqual(inspect.getattr_static(something, 'x'), Thing.x)
def test_instance_attr(self):
class Thing(object):
x = 2
def __init__(self, x):
self.x = x
thing = Thing(3)
self.assertEqual(inspect.getattr_static(thing, 'x'), 3)
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), 2)
def test_property(self):
class Thing(object):
@property
def x(self):
raise AttributeError("I'm pretending not to exist")
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_descriptor_raises_AttributeError(self):
class descriptor(object):
def __get__(*_):
raise AttributeError("I'm pretending not to exist")
desc = descriptor()
class Thing(object):
x = desc
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), desc)
def test_classAttribute(self):
class Thing(object):
x = object()
self.assertEqual(inspect.getattr_static(Thing, 'x'), Thing.x)
def test_inherited_classattribute(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
self.assertEqual(inspect.getattr_static(OtherThing, 'x'), Thing.x)
def test_slots(self):
class Thing(object):
y = 'bar'
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'y'), 'bar')
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_metaclass(self):
class meta(type):
attr = 'foo'
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'attr'), 'foo')
class sub(meta):
pass
class OtherThing(object, metaclass=sub):
x = 3
self.assertEqual(inspect.getattr_static(OtherThing, 'attr'), 'foo')
class OtherOtherThing(OtherThing):
pass
# this test is odd, but it was added as it exposed a bug
self.assertEqual(inspect.getattr_static(OtherOtherThing, 'x'), 3)
def test_no_dict_no_slots(self):
self.assertEqual(inspect.getattr_static(1, 'foo', None), None)
self.assertNotEqual(inspect.getattr_static('foo', 'lower'), None)
def test_no_dict_no_slots_instance_member(self):
# returns descriptor
with open(__file__) as handle:
self.assertEqual(inspect.getattr_static(handle, 'name'), type(handle).name)
def test_inherited_slots(self):
# returns descriptor
class Thing(object):
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
class OtherThing(Thing):
pass
# it would be nice if this worked...
# we get the descriptor instead of the instance attribute
self.assertEqual(inspect.getattr_static(OtherThing(), 'x'), Thing.x)
def test_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class Foo(object):
d = descriptor()
foo = Foo()
# for a non data descriptor we return the instance attribute
foo.__dict__['d'] = 1
self.assertEqual(inspect.getattr_static(foo, 'd'), 1)
# if the descriptor is a data-desciptor we should return the
# descriptor
descriptor.__set__ = lambda s, i, v: None
self.assertEqual(inspect.getattr_static(foo, 'd'), Foo.__dict__['d'])
def test_metaclass_with_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class meta(type):
d = descriptor()
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'd'), meta.__dict__['d'])
def test_class_as_property(self):
class Base(object):
foo = 3
class Something(Base):
executed = False
@property
def __class__(self):
self.executed = True
return object
instance = Something()
self.assertEqual(inspect.getattr_static(instance, 'foo'), 3)
self.assertFalse(instance.executed)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_mro_as_property(self):
class Meta(type):
@property
def __mro__(self):
return (object,)
class Base(object):
foo = 3
class Something(Base, metaclass=Meta):
pass
self.assertEqual(inspect.getattr_static(Something(), 'foo'), 3)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_dict_as_property(self):
test = self
test.called = False
class Foo(dict):
a = 3
@property
def __dict__(self):
test.called = True
return {}
foo = Foo()
foo.a = 4
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_custom_object_dict(self):
test = self
test.called = False
class Custom(dict):
def get(self, key, default=None):
test.called = True
super().get(key, default)
class Foo(object):
a = 3
foo = Foo()
foo.__dict__ = Custom()
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_metaclass_dict_as_property(self):
class Meta(type):
@property
def __dict__(self):
self.executed = True
class Thing(metaclass=Meta):
executed = False
def __init__(self):
self.spam = 42
instance = Thing()
self.assertEqual(inspect.getattr_static(instance, "spam"), 42)
self.assertFalse(Thing.executed)
class TestGetGeneratorState(unittest.TestCase):
def setUp(self):
def number_generator():
for number in range(5):
yield number
self.generator = number_generator()
def _generatorstate(self):
return inspect.getgeneratorstate(self.generator)
def test_created(self):
self.assertEqual(self._generatorstate(), inspect.GEN_CREATED)
def test_suspended(self):
next(self.generator)
self.assertEqual(self._generatorstate(), inspect.GEN_SUSPENDED)
def test_closed_after_exhaustion(self):
for i in self.generator:
pass
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_closed_after_immediate_exception(self):
with self.assertRaises(RuntimeError):
self.generator.throw(RuntimeError)
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_running(self):
# As mentioned on issue #10220, checking for the RUNNING state only
# makes sense inside the generator itself.
# The following generator checks for this by using the closure's
# reference to self and the generator state checking helper method
def running_check_generator():
for number in range(5):
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
yield number
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
self.generator = running_check_generator()
# Running up to the first yield
next(self.generator)
# Running after the first yield
next(self.generator)
def test_easy_debugging(self):
# repr() and str() of a generator state should contain the state name
names = 'GEN_CREATED GEN_RUNNING GEN_SUSPENDED GEN_CLOSED'.split()
for name in names:
state = getattr(inspect, name)
self.assertIn(name, repr(state))
self.assertIn(name, str(state))
def test_main():
run_unittest(
TestDecorators, TestRetrievingSourceCode, TestOneliners, TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates,
TestGetcallargsFunctions, TestGetcallargsMethods,
TestGetcallargsUnboundMethods, TestGetattrStatic, TestGetGeneratorState,
TestNoEOL
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 | 4,287,678,903,176,920,000 | 37.241121 | 88 | 0.570258 | false |
CloudServer/nova | nova/openstack/common/threadgroup.py | 10 | 4949 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import eventlet
from eventlet import greenpool
from nova.openstack.common._i18n import _LE
from nova.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except eventlet.greenlet.GreenletExit:
pass
except Exception:
LOG.exception(_LE('Error stopping thread.'))
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception:
LOG.exception(_LE('Error stopping timer.'))
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception:
LOG.exception(_LE('Error waiting on ThreadGroup.'))
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
| apache-2.0 | -2,572,168,131,372,811,300 | 31.993333 | 79 | 0.601738 | false |
endlessm/chromium-browser | third_party/blink/renderer/build/scripts/in_generator.py | 2 | 4149 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import os.path
import shlex
import shutil
import optparse
from in_file import InFile
#########################################################
# This is now deprecated - use json5_generator.py instead
#########################################################
class GenericWriter(object):
def __init__(self, in_files):
self._outputs = {} # file_name -> generator
def _write_file_if_changed(self, output_dir, contents, file_name):
path = os.path.join(output_dir, file_name)
# The build system should ensure our output directory exists, but just in case.
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
# Only write the file if the contents have changed. This allows ninja to
# skip rebuilding targets which depend on the output.
with open(path, "a+") as output_file:
output_file.seek(0)
if output_file.read() != contents:
output_file.truncate(0)
output_file.write(contents)
def write_files(self, output_dir):
for file_name, generator in self._outputs.items():
self._write_file_if_changed(output_dir, generator(), file_name)
def set_gperf_path(self, gperf_path):
self.gperf_path = gperf_path
class Writer(GenericWriter):
# Subclasses should override.
class_name = None
defaults = None
valid_values = None
default_parameters = None
def __init__(self, in_files):
super(Writer, self).__init__(in_files)
if isinstance(in_files, basestring):
in_files = [in_files]
if in_files:
self.in_file = InFile.load_from_files(in_files, self.defaults,
self.valid_values,
self.default_parameters)
else:
self.in_file = None
class Maker(object):
def __init__(self, writer_class):
self._writer_class = writer_class
def main(self, argv):
script_name = os.path.basename(argv[0])
args = argv[1:]
if len(args) < 1:
print("USAGE: %s INPUT_FILES" % script_name)
exit(1)
parser = optparse.OptionParser()
parser.add_option("--gperf", default="gperf")
parser.add_option("--output_dir", default=os.getcwd())
options, args = parser.parse_args()
writer = self._writer_class(args)
writer.set_gperf_path(options.gperf)
writer.write_files(options.output_dir)
| bsd-3-clause | 1,514,209,112,058,730,800 | 36.718182 | 87 | 0.646662 | false |
DavidNorman/tensorflow | tensorflow/tools/compatibility/tf_upgrade_v2.py | 2 | 100587 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import copy
import functools
import sys
import pasta
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class UnaliasedTFImport(ast_edits.AnalysisResult):
def __init__(self):
self.log_level = ast_edits.ERROR
self.log_message = ("The tf_upgrade_v2 script detected an unaliased "
"`import tensorflow`. The script can only run when "
"importing with `import tensorflow as tf`.")
class VersionedTFImport(ast_edits.AnalysisResult):
def __init__(self, version):
self.log_level = ast_edits.INFO
self.log_message = ("Not upgrading symbols because `tensorflow." + version
+ "` was directly imported as `tf`.")
class TFAPIImportAnalysisSpec(ast_edits.APIAnalysisSpec):
def __init__(self):
self.symbols_to_detect = {}
self.imports_to_detect = {
("tensorflow", None): UnaliasedTFImport(),
("tensorflow.compat.v1", "tf"): VersionedTFImport("compat.v1"),
("tensorflow.compat.v2", "tf"): VersionedTFImport("compat.v2"),
}
class TFAPIChangeSpec(ast_edits.NoUpdateSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
# TODO(b/129398290)
# "tf.string_split": {
# "delimiter": "sep",
# },
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
"hash_table_shared_name": None,
},
"tf.autograph.to_code": {
"arg_types": None,
"arg_values": None,
"indentation": None,
},
"tf.autograph.to_graph": {
"arg_types": None,
"arg_values": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.resize": {
"align_corners": None,
},
"tf.image.resize_images": {
"align_corners": None,
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.nn.avg_pool": {
"value": "input"
},
"tf.nn.avg_pool2d": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"use_cudnn_on_gpu": None,
"input_sizes": "output_shape",
"out_backprop": "input",
"filter": "filters",
},
"tf.contrib.summary.audio": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.create_file_writer": {
"name": None,
},
"tf.contrib.summary.generic": {
"name": "tag",
"tensor": "data",
"family": None,
},
"tf.contrib.summary.histogram": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.image": {
"tensor": "data",
"bad_color": None,
"max_images": "max_outputs",
"family": None,
},
"tf.contrib.summary.scalar": {
"tensor": "data",
"family": None,
},
"tf.nn.weighted_cross_entropy_with_logits": {
"targets": "labels",
},
"tf.decode_raw": {
"bytes": "input_bytes",
},
"tf.io.decode_raw": {
"bytes": "input_bytes",
},
"tf.contrib.framework.load_variable": {
"checkpoint_dir": "ckpt_dir_or_file",
}
}
# Mapping from function to the new name of the function
# Add additional renames not in renames_v2.py to all_renames_v2.py.
self.symbol_renames = all_renames_v2.symbol_renames
self.import_renames = {}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
# TODO(b/129398290)
# "tf.string_split",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
"tf.nn.avg_pool",
"tf.estimator.LinearClassifier",
"tf.estimator.LinearRegressor",
"tf.estimator.DNNLinearCombinedClassifier",
"tf.estimator.DNNLinearCombinedRegressor",
"tf.estimator.DNNRegressor",
"tf.estimator.DNNClassifier",
"tf.estimator.BaselineClassifier",
"tf.estimator.BaselineRegressor",
"tf.initializers.uniform_unit_scaling",
"tf.uniform_unit_scaling_initializer",
"tf.train.sdca_fprint",
"tf.train.sdca_optimizer",
"tf.train.sdca_shrink_l1",
"tf.data.experimental.TensorStructure",
"tf.data.experimental.SparseTensorStructure",
"tf.data.experimental.RaggedTensorStructure",
"tf.data.experimental.TensorArrayStructure",
}
# Manual mapping of function names to be reordered to their list of argument
# names, in order. Only use this if argument names cannot be autodetected,
# e.g. if the functions are in contrib.
self.manual_function_reorders = {
"tf.contrib.summary.audio": [
"name", "tensor", "sample_rate", "max_outputs", "family", "step"],
"tf.contrib.summary.create_file_writer": [
"logdir", "max_queue", "flush_millis", "filename_suffix", "name"],
"tf.contrib.summary.generic": [
"name", "tensor", "metadata", "family", "step"],
"tf.contrib.summary.histogram": [
"name", "tensor", "family", "step"],
"tf.contrib.summary.image": [
"name", "tensor", "bad_color", "max_images", "family", "step"],
"tf.contrib.summary.scalar": [
"name", "tensor", "family", "step"],
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = dict(reorders_v2.reorders)
self.function_reorders.update(self.manual_function_reorders)
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
contrib_layers_layer_norm_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.layers.layer_norm` has been "
"deprecated, and its implementation has been integrated with "
"`tf.keras.layers.LayerNormalization` in TensorFlow 2.0. "
"Note that, the default value of `epsilon` is changed to `1e-3` in the "
"new API from `1e-12`, and this may introduce numerical differences. "
"Please check the new API and use that instead."
)
contrib_estimator_head_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.estimator.*_head` has been "
"deprecated, and its implementation has been integrated with "
"`tf.estimator.*Head` in TensorFlow 2.0. "
"Please check the new API and use that instead."
)
initializers_no_dtype_comment = (
ast_edits.INFO, "Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for "
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
summary_api_comment = (
ast_edits.INFO,
"The TF 1.x summary API cannot be automatically migrated to TF 2.0, so "
"symbols have been converted to tf.compat.v1.summary.* and must be "
"migrated manually. Typical usage will only require changes to the "
"summary writing logic, not to individual calls like scalar(). "
"For examples of the new summary API, see the Effective TF 2.0 "
"migration document or check the TF 2.0 TensorBoard tutorials.")
contrib_summary_comment = (
ast_edits.WARNING,
"tf.contrib.summary.* functions have been migrated best-effort to "
"tf.compat.v2.summary.* equivalents where possible, but the resulting "
"code is not guaranteed to work, so please check carefully. For more "
"information about the new summary API, see the Effective TF 2.0 "
"migration document or check the updated TensorBoard tutorials.")
contrib_summary_family_arg_comment = (
ast_edits.WARNING,
"<function name> replacement does not accept a 'family' argument; "
"instead regular name scoping should be used. This call site specifies "
"a family argument that has been removed on conversion, so the emitted "
"tag names may be incorrect without manual editing.")
contrib_create_file_writer_comment = (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() has been ported to the new "
"tf.compat.v2.summary.create_file_writer(), which no longer re-uses "
"existing event files for the same logdir; instead it always opens a "
"new writer/file. The python writer objects must be re-used explicitly "
"if the reusing behavior is desired.")
contrib_summary_record_every_n_comment = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.summary.record_summaries_every_n_global_steps(n, step) "
"should be replaced by a call to tf.compat.v2.summary.record_if() with "
"the argument `lambda: tf.math.equal(0, global_step % n)` (or in graph "
"mode, the lambda body can be used directly). If no global step was "
"passed, instead use tf.compat.v1.train.get_or_create_global_step().")
contrib_summary_graph_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.graph() has no direct "
"equivalent in TF 2.0 because manual graph construction has been "
"superseded by use of tf.function. To log tf.function execution graphs "
"to the summary writer, use the new tf.compat.v2.summary.trace_* "
"functions instead.")
contrib_summary_import_event_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.import_event() has no "
"direct equivalent in TF 2.0. For a similar experimental feature, try "
"tf.compat.v2.summary.experimental.write_raw_pb() which also accepts "
"serialized summary protocol buffer input, but for tf.Summary "
"protobufs rather than tf.Events.")
keras_default_save_format_comment = (
ast_edits.WARNING,
"(This warning is only applicable if the code saves a tf.Keras model) "
"Keras model.save now saves to the Tensorflow SavedModel format by "
"default, instead of HDF5. To continue saving to HDF5, add the "
"argument save_format='h5' to the save() function.")
distribute_strategy_api_changes = (
"If you're using the strategy with a "
"custom training loop, note the following changes in methods: "
"make_dataset_iterator->experimental_distribute_dataset, "
"experimental_make_numpy_iterator->experimental_make_numpy_dataset, "
"extended.call_for_each_replica->experimental_run_v2, "
"reduce requires an axis argument, "
"unwrap->experimental_local_results "
"experimental_initialize and experimental_finalize no longer needed ")
contrib_mirrored_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.MirroredStrategy has "
"been migrated to tf.distribute.MirroredStrategy. Things to note: "
"Constructor arguments have changed. If you are using "
"MirroredStrategy with Keras training framework, the input provided to "
"`model.fit` will be assumed to have global batch size and split "
"across the replicas. " + distribute_strategy_api_changes)
core_mirrored_strategy_warning = (
ast_edits.WARNING,
"(Manual edit may be required) tf.distribute.MirroredStrategy API has "
"changed. " + distribute_strategy_api_changes)
contrib_one_device_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.OneDeviceStrategy has "
"been migrated to tf.distribute.OneDeviceStrategy. " +
distribute_strategy_api_changes)
contrib_tpu_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.TPUStrategy has "
"been migrated to tf.distribute.experimental.TPUStrategy. Note the "
"slight changes in constructor. " + distribute_strategy_api_changes)
contrib_collective_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.CollectiveAllReduceStrategy has "
"been migrated to "
"tf.distribute.experimental.MultiWorkerMirroredStrategy. Note the "
"changes in constructor. " + distribute_strategy_api_changes)
contrib_ps_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.ParameterServerStrategy has "
"been migrated to "
"tf.distribute.experimental.ParameterServerStrategy (multi machine) "
" and tf.distribute.experimental.CentralStorageStrategy (one machine). "
"Note the changes in constructors. " + distribute_strategy_api_changes)
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"*.save":
keras_default_save_format_comment,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.contrib.layers.layer_norm":
contrib_layers_layer_norm_comment,
"tf.contrib.estimator.binary_classification_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.logistic_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_class_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_label_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.poisson_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.regression_head":
contrib_estimator_head_comment,
"tf.contrib.summary.all_summary_ops":
contrib_summary_comment,
"tf.contrib.summary.audio":
contrib_summary_comment,
"tf.contrib.summary.create_file_writer":
contrib_create_file_writer_comment,
"tf.contrib.summary.generic":
contrib_summary_comment,
"tf.contrib.summary.graph":
contrib_summary_graph_comment,
"tf.contrib.summary.histogram":
contrib_summary_comment,
"tf.contrib.summary.import_event":
contrib_summary_import_event_comment,
"tf.contrib.summary.image":
contrib_summary_comment,
"tf.contrib.summary.record_summaries_every_n_global_steps":
contrib_summary_record_every_n_comment,
"tf.contrib.summary.scalar":
contrib_summary_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.nn.embedding_lookup":
deprecate_partition_strategy_comment,
"tf.nn.embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.estimator.model_to_estimator":
(ast_edits.WARNING,
"Estimators from <function name> will save object-based "
"checkpoints (format used by `keras_model.save_weights` and "
"`keras_model.load_weights`) by default in 2.0. To continue "
"saving name-based checkpoints, set `checkpoint_format='saver'`."),
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
"tf.get_variable":
(ast_edits.WARNING,
"<function name> returns ResourceVariables by default in 2.0, "
"which have well-defined semantics and are stricter about shapes. "
"You can disable this behavior by passing use_resource=False, or "
"by calling tf.compat.v1.disable_resource_variables()."),
"tf.pywrap_tensorflow":
(ast_edits.ERROR,
"<function name> cannot be converted automatically. "
"`tf.pywrap_tensorflow` will not be distributed with "
"TensorFlow 2.0, please consider an alternative in public "
"TensorFlow APIs."),
"tf.contrib.distribute.MirroredStrategy":
contrib_mirrored_strategy_warning,
"tf.distribute.MirroredStrategy":
core_mirrored_strategy_warning,
"tf.contrib.distribute.OneDeviceStrategy":
contrib_one_device_strategy_warning,
"tf.contrib.distribute.TPUStrategy":
contrib_tpu_strategy_warning,
"tf.contrib.distribute.CollectiveAllReduceStrategy":
contrib_collective_strategy_warning,
"tf.contrib.distribute.ParameterServerStrategy":
contrib_ps_strategy_warning,
"tf.summary.FileWriter": summary_api_comment,
"tf.summary.FileWriterCache": summary_api_comment,
"tf.summary.Summary": summary_api_comment,
"tf.summary.audio": summary_api_comment,
"tf.summary.histogram": summary_api_comment,
"tf.summary.image": summary_api_comment,
"tf.summary.merge": summary_api_comment,
"tf.summary.merge_all": summary_api_comment,
"tf.summary.scalar": summary_api_comment,
"tf.summary.tensor_summary": summary_api_comment,
"tf.summary.text": summary_api_comment,
}
for symbol, replacement in all_renames_v2.addons_symbol_mappings.items():
warning = (
ast_edits.WARNING, (
"(Manual edit required) `{}` has been migrated to `{}` in "
"TensorFlow Addons. The API spec may have changed during the "
"migration. Please see https://github.com/tensorflow/addons "
"for more info.").format(symbol, replacement))
self.function_warnings[symbol] = warning
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5): (
ast_edits.INFO,
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3): (
ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
"tf.contrib.summary.audio": {
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.create_file_writer": {
("name", 4): (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() no longer supports "
"implicit writer re-use based on shared logdirs or resource "
"names; this call site passed a 'name' argument that has been "
"removed. The new tf.compat.v2.summary.create_file_writer() "
"replacement has a 'name' parameter but the semantics are "
"the usual ones to name the op itself and do not control "
"writer re-use; writers must be manually re-used if desired.")
},
"tf.contrib.summary.generic": {
("name", 0): (
ast_edits.WARNING,
"tf.contrib.summary.generic() takes a 'name' argument for the "
"op name that also determines the emitted tag (prefixed by any "
"active name scopes), but tf.compat.v2.summary.write(), which "
"replaces it, separates these into 'tag' and 'name' arguments. "
"The 'name' argument here has been converted to 'tag' to "
"preserve a meaningful tag, but any name scopes will not be "
"reflected in the tag without manual editing."),
("family", 3): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.histogram": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.image": {
("bad_color", 2): (
ast_edits.WARNING,
"tf.contrib.summary.image no longer takes the 'bad_color' "
"argument; caller must now preprocess if needed. This call "
"site specifies a bad_color argument so it cannot be converted "
"safely."),
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.scalar": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.image.resize": {
("align_corners",
3): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize."),
},
"tf.image.resize_bilinear": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bilinear."),
},
"tf.image.resize_area": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_area."),
},
"tf.image.resize_bicubic": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bicubic."),
},
"tf.image.resize_nearest_neighbor": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_nearest_neighbor."),
},
}
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
canned_estimator_msg_optimizer = (
"tf.keras.optimizers.* only, so the call was converted to compat.v1. "
"Please note that tf.train.Optimizers have one-to-one correspondents "
"in tf.keras.optimizers, so you may be able to convert to the new "
"optimizers directly (See https://www.tensorflow.org/api_docs/python"
"/tf/keras/optimizers). Checkpoint compatibility is not guaranteed, "
"but there is a checkpoint converter tool that you can use.")
canned_estimator_msg = (
"no longer takes `input_layer_partitioner` arg, and it supports "
+ canned_estimator_msg_optimizer)
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.name_scope": _name_scope_transformer,
# TODO(b/129398290)
# "tf.string_split": _string_split_transformer,
"tf.strings.split": _string_split_rtype_transformer,
"tf.estimator.BaselineEstimator":
functools.partial(
_rename_if_arg_found_transformer,
arg_name="optimizer",
message=("tf.estimator.BaselineEstimator supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["optimizer"],
message=("tf.estimator.BaselineClassifier supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message=("tf.estimator.BaselineRegressor supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.DNNEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNEstimator no longer takes "
"input_layer_partitioner, so the call was converted to "
"compat.v1."
),
"tf.estimator.DNNClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNClassifier " + canned_estimator_msg,
),
"tf.estimator.DNNRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNRegressor " + canned_estimator_msg,
),
"tf.estimator.LinearEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearEstimator " + canned_estimator_msg,
),
"tf.estimator.LinearClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearClassifier " + canned_estimator_msg,
),
"tf.estimator.LinearRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearRegressor " + canned_estimator_msg,
),
"tf.estimator.DNNLinearCombinedEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedEstimator "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedClassifier "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedRegressor "
+ canned_estimator_msg),
),
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.dilation2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.contrib.summary.always_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="True"),
"tf.contrib.summary.audio": _add_summary_step_transformer,
"tf.contrib.summary.generic": _add_summary_step_transformer,
"tf.contrib.summary.histogram": _add_summary_step_transformer,
"tf.contrib.summary.image": _add_summary_step_transformer,
"tf.contrib.summary.never_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="False"),
"tf.contrib.summary.scalar": _add_summary_step_transformer,
"tf.contrib.layers.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"tf.contrib.layers.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"tf.contrib.layers.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.initializers.uniform_unit_scaling":
_add_uniform_scaling_initializer_transformer,
"tf.uniform_unit_scaling_initializer":
_add_uniform_scaling_initializer_transformer,
"slim.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"slim.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"slim.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"slim.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"slim.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.keras.models.save_model": functools.partial(
_add_argument_transformer,
arg_name="save_format",
arg_value_ast=ast.Str("h5")),
}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
def preprocess(self, root_node):
visitor = ast_edits.PastaAnalyzeVisitor(TFAPIImportAnalysisSpec())
visitor.visit(root_node)
detections = set(visitor.results)
# If we have detected the presence of imports of specific TF versions,
# We want to modify the update spec to check only module deprecations
# and skip all other conversions.
if detections:
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
self.function_transformers = {}
self.import_renames = {}
return visitor.log, visitor.warnings_and_errors
def clear_preprocessing(self):
self.__init__()
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1]),
full_name or name)
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (full_name.startswith("tf.compat.v1.data") or
full_name.startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
already_stop_grad = (isinstance(old_value, ast.Call) and
isinstance(old_value.func, ast.Attribute) and
old_value.func.attr == "stop_gradient" and
isinstance(old_value.func.value, ast.Name) and
old_value.func.value.id == "tf")
if already_stop_grad:
return False
try:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
except TypeError:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
return True
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
if _wrap_label(karg, karg.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
new_keywords = []
for kw in node.keywords:
if kw.arg != "align_corners":
new_keywords.append(kw)
node.keywords = new_keywords
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'guassian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
def _add_summary_step_transformer(parent, node, full_name, name, logs):
"""Adds a step argument to the summary API call if not specified.
The inserted argument value is tf.compat.v1.train.get_or_create_global_step().
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "step":
return node
default_value = "tf.compat.v1.train.get_or_create_global_step()"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="step", value=ast_value))
logs.append((
ast_edits.WARNING, node.lineno, node.col_offset,
"Summary API writing function %s now requires a 'step' argument; "
"inserting default of %s." % (full_name or name, default_value)))
return node
def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs,
cond):
"""Adds cond argument to tf.contrib.summary.xxx_record_summaries().
This is in anticipation of them being renamed to tf.summary.record_if(), which
requires the cond argument.
"""
node.args.append(pasta.parse(cond))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding `%s` argument to %s in anticipation of it being renamed to "
"tf.compat.v2.summary.record_if()" % (cond, full_name or name)))
return node
def _add_loss_reduction_transformer(parent, node, full_name, name, logs):
"""Adds a loss_reduction argument if not specified.
Default value for tf.estimator.*Classifier and tf.estimator.*Regressor
loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update
existing calls to use the old default value `tf.keras.losses.Reduction.SUM`.
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "loss_reduction":
return node
default_value = "tf.keras.losses.Reduction.SUM"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="loss_reduction", value=ast_value))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"%s: Default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE; inserting old default value %s.\n"
% (full_name or name, default_value)))
return node
def _rename_if_any_arg_found_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if any of the arg_names is found.
Args:
parent: Parent of node.
node: ast.Call node to modify.
full_name: full name of function to modify.
name: name of function to modify.
logs: list of logs to append to.
arg_names: list of names of the argument to look for.
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node,
full_name, name, logs,
arg_name, arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _rename_if_arg_found_and_add_loss_reduction_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Combination of _rename_if_arg_found and _add_loss_reduction transformers.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_names: list of names of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
node = _add_loss_reduction_transformer(parent, node, full_name, name, logs)
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node, full_name,
name, logs, arg_name,
arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _add_uniform_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to uniform_unit_scaling_initializer.
Transforms:
tf.uniform_unit_scaling_initializer(factor, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, distribution="uniform", seed=seed)
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
distribution_value = "\"uniform\""
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(distribution_value)
node.keywords.append(ast.keyword(arg="distribution", value=ast_value))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
return node
def _contrib_layers_xavier_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
"""
def _get_distribution(old_value):
"""Returns an AST matching the following:
("uniform" if (old_value) else "truncated_normal")
"""
dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = dist.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.base.formatting.set(dist, "prefix", "(")
pasta.base.formatting.set(dist, "suffix", ")")
return dist
found_distribution = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "uniform":
found_distribution = True
keyword_arg.arg = "distribution"
old_value = keyword_arg.value
new_value = _get_distribution(keyword_arg.value)
pasta.ast_utils.replace_child(keyword_arg, old_value, new_value)
pasta.base.formatting.set(keyword_arg.value, "prefix", "(")
pasta.base.formatting.set(keyword_arg.value, "suffix", ")")
new_keywords = []
scale = pasta.parse("1.0")
new_keywords.append(ast.keyword(arg="scale", value=scale))
mode = pasta.parse("\"fan_avg\"")
new_keywords.append(ast.keyword(arg="mode", value=mode))
if len(node.args) >= 1:
found_distribution = True
dist = _get_distribution(node.args[0])
new_keywords.append(ast.keyword(arg="distribution", value=dist))
if not found_distribution:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
uniform_dist = pasta.parse("\"uniform\"")
new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist))
if len(node.args) >= 2:
new_keywords.append(ast.keyword(arg="seed", value=node.args[1]))
if len(node.args) >= 3:
new_keywords.append(ast.keyword(arg="dtype", value=node.args[2]))
node.args = []
node.keywords = new_keywords + node.keywords
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers xavier initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_variance_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.variance_scaling_initializer.
Transforms:
tf.contrib.layers.variance_scaling_initializer(
factor, mode, uniform, seed, dtype
) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, mode=mode.lower(),
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
And handles the case where no factor is provided and scale needs to be
set to 2.0 to match contrib's default instead of tf.keras.initializer's
default of 1.0
"""
def _replace_distribution(parent, old_value):
"""Replaces old_value: ("uniform" if (old_value) else "truncated_normal")"""
new_value = pasta.parse(
"\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = new_value.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.ast_utils.replace_child(parent, old_value, new_value)
pasta.base.formatting.set(new_value, "prefix", "(")
pasta.base.formatting.set(new_value, "suffix", ")")
def _replace_mode(parent, old_value):
"""Replaces old_value with (old_value).lower()."""
new_value = pasta.parse("mode.lower()")
mode = new_value.body[0].value.func
pasta.ast_utils.replace_child(mode, mode.value, old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Need to keep track of scale because slim & keras
# have different defaults
found_scale = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
found_scale = True
if keyword_arg.arg == "mode":
_replace_mode(keyword_arg, keyword_arg.value)
if keyword_arg.arg == "uniform":
keyword_arg.arg = "distribution"
_replace_distribution(keyword_arg, keyword_arg.value)
# Handle any detected positional arguments
if len(node.args) >= 1:
found_scale = True
if len(node.args) >= 2:
_replace_mode(node, node.args[1])
if len(node.args) >= 3:
_replace_distribution(node, node.args[2])
# If no scale was provided, make tf 2.0 use slim's default factor
if not found_scale:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
scale_value = pasta.parse("2.0")
node.keywords = ([ast.keyword(arg="scale", value=scale_value)]
+ node.keywords)
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers.variance_scaling_initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_l1_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg.
"""
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renaming scale arg of regularizer\n"))
keyword.arg = "l"
if keyword.arg == "scope":
scope_keyword = keyword
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l1"
return node
def _contrib_layers_l2_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l2 regularizer with Keras one, with l=0.5*scale.
Also drops the scope argument.
"""
def _replace_scale_node(parent, old_value):
"""Replaces old_value with 0.5*(old_value)."""
half = ast.Num(n=0.5)
half.lineno = 0
half.col_offset = 0
new_value = ast.BinOp(left=half, op=ast.Mult(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around scale.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
keyword.arg = "l"
_replace_scale_node(keyword, keyword.value)
if keyword.arg == "scope":
scope_keyword = keyword
# Maybe it was a positional arg
if len(node.args) >= 1:
_replace_scale_node(node, node.args[0])
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Multiplying scale arg of tf.contrib.layers.l2_regularizer"
" by half to what tf.keras.regularizers.l2 expects.\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l2"
return node
def _name_scope_transformer(parent, node, full_name, name, logs):
"""Fix name scope invocation to use 'default_name' and omit 'values' args."""
name_found, name = ast_edits.get_arg_value(node, "name", 0)
default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1)
# If an actual name was given...
if name_found and pasta.dump(name) != "None":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"`name` passed to `name_scope`. Because you may be re-entering"
" an existing scope, it is not safe to convert automatically, "
" the v2 name_scope does not support re-entering scopes by"
" name.\n"))
# Rename to compat.v1
new_name = "tf.compat.v1.name_scope"
logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset,
"Renamed %r to %r" % (full_name, new_name)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
if default_found:
# New name scope doesn't have name, but it has a default name. We use
# name=default_name, and values can be dropped (it's only for
# error reporting and useless outside of graph mode).
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Using default_name as name in call to name_scope.\n"))
# Remove all args other than name
node.args = []
node.keywords = [ast.keyword(arg="name", value=default_name)]
return node
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"name_scope call with neither name nor default_name cannot be "
"converted properly."))
def _rename_to_compat_v1(node, full_name, logs, reason):
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
return _rename_func(node, full_name, new_name, logs, reason)
def _rename_func(node, full_name, new_name, logs, reason):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renamed %r to %r: %s" % (full_name, new_name, reason)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
def _string_split_transformer(parent, node, full_name, name, logs):
"""Update tf.string_split arguments: skip_empty, sep, result_type, source."""
# Check the skip_empty parameter: if not false, then use compat.v1.
for i, kw in enumerate(node.keywords):
if kw.arg == "skip_empty":
if _is_ast_false(kw.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"removed argument skip_empty for tf.string_split."))
node.keywords.pop(i)
break
else:
return _rename_to_compat_v1(
node, full_name, logs, "tf.string_split's replacement no longer "
"takes the skip_empty argument.")
# Check the sep parameter: if it's definitely an empty string, use
# tf.strings.bytes_split(). If we can't tell, then use compat.v1.
found_sep = False
for i, kw in enumerate(node.keywords):
if kw.arg == "sep":
found_sep = True
if isinstance(kw.value, ast.Str):
if kw.value.s == "":
node = _rename_func(
node, full_name, "tf.strings.bytes_split", logs,
"Splitting bytes is not handled by tf.strings.bytes_split().")
node.keywords.pop(i)
else:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep is the empty string; but sep is not a string literal, "
"so we can't tell if it's an empty string.")
if not found_sep:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep unspecified: it now splits on all whitespace, not just "
"the space character.")
# Check the result_type parameter
return _string_split_rtype_transformer(parent, node, full_name, name, logs)
def _string_split_rtype_transformer(parent, node, full_name, name, logs):
"""Update tf.strings.split arguments: result_type, source."""
# Remove the "result_type" argument.
need_to_sparse = True
for i, kw in enumerate(node.keywords):
if kw.arg == "result_type":
if (isinstance(kw.value, ast.Str) and
kw.value.s in ("RaggedTensor", "SparseTensor")):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument result_type=%r for function %s" %
(kw.value.s, full_name or name)))
node.keywords.pop(i)
if kw.value.s == "RaggedTensor":
need_to_sparse = False
else:
return _rename_to_compat_v1(
node, full_name, logs,
"%s no longer takes the result_type parameter." % full_name)
break
for i, kw in enumerate(node.keywords):
if kw.arg == "source":
kw.arg = "input"
# If necessary, add a call to .to_sparse() to convert the output of
# strings.split from a RaggedTensor to a SparseTensor.
if need_to_sparse:
if (isinstance(parent, ast.Attribute) and parent.attr == "to_sparse"):
return # Prevent infinite recursion (since child nodes are transformed)
logs.append(
(ast_edits.INFO, node.lineno, node.col_offset,
"Adding call to RaggedTensor.to_sparse() to result of strings.split, "
"since it now returns a RaggedTensor."))
node = ast.Attribute(value=copy.deepcopy(node), attr="to_sparse")
try:
node = ast.Call(node, [], [])
except TypeError:
node = ast.Call(node, [], [], None, None)
return node
| apache-2.0 | -6,810,997,088,229,367,000 | 38.322518 | 80 | 0.595763 | false |
Leoniela/nipype | nipype/utils/tests/test_misc.py | 9 | 2142 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from nipype.testing import assert_equal, assert_true, assert_false
from nipype.utils.misc import (container_to_string, getsource,
create_function_from_source, str2bool, flatten,
unflatten)
def test_cont_to_str():
# list
x = ['a', 'b']
yield assert_equal, container_to_string(x), 'a b'
# tuple
x = tuple(x)
yield assert_equal, container_to_string(x), 'a b'
# set
x = set(x)
y = container_to_string(x)
yield assert_true, (y == 'a b') or (y == 'b a')
# dict
x = dict(a='a', b='b')
y = container_to_string(x)
yield assert_true, (y == 'a b') or (y == 'b a')
# string
yield assert_equal, container_to_string('foobar'), 'foobar'
# int. Integers are not the main intent of this function, but see
# no reason why they shouldn't work.
yield assert_equal, container_to_string(123), '123'
def _func1(x):
return x**3
def test_func_to_str():
def func1(x):
return x**2
# Should be ok with both functions!
for f in _func1, func1:
f_src = getsource(f)
f_recreated = create_function_from_source(f_src)
yield assert_equal, f(2.3), f_recreated(2.3)
def test_str2bool():
yield assert_true, str2bool("yes")
yield assert_true, str2bool("true")
yield assert_true, str2bool("t")
yield assert_true, str2bool("1")
yield assert_false, str2bool("no")
yield assert_false, str2bool("false")
yield assert_false, str2bool("n")
yield assert_false, str2bool("f")
yield assert_false, str2bool("0")
def test_flatten():
in_list = [[1,2,3],[4],[[5,6],7],8]
flat = flatten(in_list)
yield assert_equal, flat, [1,2,3,4,5,6,7,8]
back = unflatten(flat, in_list)
yield assert_equal, in_list, back
new_list = [2,3,4,5,6,7,8,9]
back = unflatten(new_list, in_list)
yield assert_equal, back, [[2,3,4],[5],[[6,7],8],9]
flat = flatten([])
yield assert_equal, flat, []
back = unflatten([], [])
yield assert_equal, back, []
| bsd-3-clause | 2,997,545,867,567,757,000 | 28.75 | 78 | 0.598506 | false |
Belxjander/Kirito | SnowStorm/indra/lib/python/indra/ipc/siesta.py | 2 | 15584 | """\
@file siesta.py
@brief A tiny llsd based RESTful web services framework
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
from indra.base import config
from indra.base import llsd
from webob import exc
import webob
import re, socket
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import cjson
json_decode = cjson.decode
json_encode = cjson.encode
JsonDecodeError = cjson.DecodeError
JsonEncodeError = cjson.EncodeError
except ImportError:
import simplejson
json_decode = simplejson.loads
json_encode = simplejson.dumps
JsonDecodeError = ValueError
JsonEncodeError = TypeError
llsd_parsers = {
'application/json': json_decode,
llsd.BINARY_MIME_TYPE: llsd.parse_binary,
'application/llsd+notation': llsd.parse_notation,
llsd.XML_MIME_TYPE: llsd.parse_xml,
'application/xml': llsd.parse_xml,
}
def mime_type(content_type):
'''Given a Content-Type header, return only the MIME type.'''
return content_type.split(';', 1)[0].strip().lower()
class BodyLLSD(object):
'''Give a webob Request or Response an llsd based "content" property.
Getting the content property parses the body, and caches the result.
Setting the content property formats a payload, and the body property
is set.'''
def _llsd__get(self):
'''Get, set, or delete the LLSD value stored in this object.'''
try:
return self._llsd
except AttributeError:
if not self.body:
raise AttributeError('No llsd attribute has been set')
else:
mtype = mime_type(self.content_type)
try:
parser = llsd_parsers[mtype]
except KeyError:
raise exc.HTTPUnsupportedMediaType(
'Content type %s not supported' % mtype).exception
try:
self._llsd = parser(self.body)
except (llsd.LLSDParseError, JsonDecodeError, TypeError), err:
raise exc.HTTPBadRequest(
'Could not parse body: %r' % err.args).exception
return self._llsd
def _llsd__set(self, val):
req = getattr(self, 'request', None)
if req is not None:
formatter, ctype = formatter_for_request(req)
self.content_type = ctype
else:
formatter, ctype = formatter_for_mime_type(
mime_type(self.content_type))
self.body = formatter(val)
def _llsd__del(self):
if hasattr(self, '_llsd'):
del self._llsd
content = property(_llsd__get, _llsd__set, _llsd__del)
class Response(webob.Response, BodyLLSD):
'''Response class with LLSD support.
A sensible default content type is used.
Setting the llsd property also sets the body. Getting the llsd
property parses the body if necessary.
If you set the body property directly, the llsd property will be
deleted.'''
default_content_type = 'application/llsd+xml'
def _body__set(self, body):
if hasattr(self, '_llsd'):
del self._llsd
super(Response, self)._body__set(body)
def cache_forever(self):
self.cache_expires(86400 * 365)
body = property(webob.Response._body__get, _body__set,
webob.Response._body__del,
webob.Response._body__get.__doc__)
class Request(webob.Request, BodyLLSD):
'''Request class with LLSD support.
Sensible content type and accept headers are used by default.
Setting the content property also sets the body. Getting the content
property parses the body if necessary.
If you set the body property directly, the content property will be
deleted.'''
default_content_type = 'application/llsd+xml'
default_accept = ('application/llsd+xml; q=0.5, '
'application/llsd+notation; q=0.3, '
'application/llsd+binary; q=0.2, '
'application/xml; q=0.1, '
'application/json; q=0.0')
def __init__(self, environ=None, *args, **kwargs):
if environ is None:
environ = {}
else:
environ = environ.copy()
if 'CONTENT_TYPE' not in environ:
environ['CONTENT_TYPE'] = self.default_content_type
if 'HTTP_ACCEPT' not in environ:
environ['HTTP_ACCEPT'] = self.default_accept
super(Request, self).__init__(environ, *args, **kwargs)
def _body__set(self, body):
if hasattr(self, '_llsd'):
del self._llsd
super(Request, self)._body__set(body)
def path_urljoin(self, *parts):
return '/'.join([path_url.rstrip('/')] + list(parts))
body = property(webob.Request._body__get, _body__set,
webob.Request._body__del, webob.Request._body__get.__doc__)
def create_response(self, content=None, status='200 OK',
conditional_response=webob.NoDefault):
resp = self.ResponseClass(status=status, request=self,
conditional_response=conditional_response)
resp.content = content
return resp
def curl(self):
'''Create and fill out a pycurl easy object from this request.'''
import pycurl
c = pycurl.Curl()
c.setopt(pycurl.URL, self.url())
if self.headers:
c.setopt(pycurl.HTTPHEADER,
['%s: %s' % (k, self.headers[k]) for k in self.headers])
c.setopt(pycurl.FOLLOWLOCATION, True)
c.setopt(pycurl.AUTOREFERER, True)
c.setopt(pycurl.MAXREDIRS, 16)
c.setopt(pycurl.NOSIGNAL, True)
c.setopt(pycurl.READFUNCTION, self.body_file.read)
c.setopt(pycurl.SSL_VERIFYHOST, 2)
if self.method == 'POST':
c.setopt(pycurl.POST, True)
post301 = getattr(pycurl, 'POST301', None)
if post301 is not None:
# Added in libcurl 7.17.1.
c.setopt(post301, True)
elif self.method == 'PUT':
c.setopt(pycurl.PUT, True)
elif self.method != 'GET':
c.setopt(pycurl.CUSTOMREQUEST, self.method)
return c
Request.ResponseClass = Response
Response.RequestClass = Request
llsd_formatters = {
'application/json': json_encode,
'application/llsd+binary': llsd.format_binary,
'application/llsd+notation': llsd.format_notation,
'application/llsd+xml': llsd.format_xml,
'application/xml': llsd.format_xml,
}
formatter_qualities = (
('application/llsd+xml', 1.0),
('application/llsd+notation', 0.5),
('application/llsd+binary', 0.4),
('application/xml', 0.3),
('application/json', 0.2),
)
def formatter_for_mime_type(mime_type):
'''Return a formatter that encodes to the given MIME type.
The result is a pair of function and MIME type.'''
try:
return llsd_formatters[mime_type], mime_type
except KeyError:
raise exc.HTTPInternalServerError(
'Could not use MIME type %r to format response' %
mime_type).exception
def formatter_for_request(req):
'''Return a formatter that encodes to the preferred type of the client.
The result is a pair of function and actual MIME type.'''
ctype = req.accept.best_match(formatter_qualities)
try:
return llsd_formatters[ctype], ctype
except KeyError:
raise exc.HTTPNotAcceptable().exception
def wsgi_adapter(func, environ, start_response):
'''Adapt a Siesta callable to act as a WSGI application.'''
# Process the request as appropriate.
try:
req = Request(environ)
#print req.urlvars
resp = func(req, **req.urlvars)
if not isinstance(resp, webob.Response):
try:
formatter, ctype = formatter_for_request(req)
resp = req.ResponseClass(formatter(resp), content_type=ctype)
resp._llsd = resp
except (JsonEncodeError, TypeError), err:
resp = exc.HTTPInternalServerError(
detail='Could not format response')
except exc.HTTPException, e:
resp = e
except socket.error, e:
resp = exc.HTTPInternalServerError(detail=e.args[1])
return resp(environ, start_response)
def llsd_callable(func):
'''Turn a callable into a Siesta application.'''
def replacement(environ, start_response):
return wsgi_adapter(func, environ, start_response)
return replacement
def llsd_method(http_method, func):
def replacement(environ, start_response):
if environ['REQUEST_METHOD'] == http_method:
return wsgi_adapter(func, environ, start_response)
return exc.HTTPMethodNotAllowed()(environ, start_response)
return replacement
http11_methods = 'OPTIONS GET HEAD POST PUT DELETE TRACE CONNECT'.split()
http11_methods.sort()
def llsd_class(cls):
'''Turn a class into a Siesta application.
A new instance is created for each request. A HTTP method FOO is
turned into a call to the handle_foo method of the instance.'''
def foo(req, **kwargs):
instance = cls()
method = req.method.lower()
try:
handler = getattr(instance, 'handle_' + method)
except AttributeError:
allowed = [m for m in http11_methods
if hasattr(instance, 'handle_' + m.lower())]
raise exc.HTTPMethodNotAllowed(
headers={'Allow': ', '.join(allowed)}).exception
#print "kwargs: ", kwargs
return handler(req, **kwargs)
def replacement(environ, start_response):
return wsgi_adapter(foo, environ, start_response)
return replacement
def curl(reqs):
import pycurl
m = pycurl.CurlMulti()
curls = [r.curl() for r in reqs]
io = {}
for c in curls:
fp = StringIO()
hdr = StringIO()
c.setopt(pycurl.WRITEFUNCTION, fp.write)
c.setopt(pycurl.HEADERFUNCTION, hdr.write)
io[id(c)] = fp, hdr
m.handles = curls
try:
while True:
ret, num_handles = m.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
finally:
m.close()
for req, c in zip(reqs, curls):
fp, hdr = io[id(c)]
hdr.seek(0)
status = hdr.readline().rstrip()
headers = []
name, values = None, None
# XXX We don't currently handle bogus header data.
for line in hdr.readlines():
if not line[0].isspace():
if name:
headers.append((name, ' '.join(values)))
name, value = line.strip().split(':', 1)
value = [value]
else:
values.append(line.strip())
if name:
headers.append((name, ' '.join(values)))
resp = c.ResponseClass(fp.getvalue(), status, headers, request=req)
route_re = re.compile(r'''
\{ # exact character "{"
(\w*) # "config" or variable (restricted to a-z, 0-9, _)
(?:([:~])([^}]+))? # optional :type or ~regex part
\} # exact character "}"
''', re.VERBOSE)
predefined_regexps = {
'uuid': r'[a-f0-9][a-f0-9-]{31,35}',
'int': r'\d+',
'host': r'[a-z0-9][a-z0-9\-\.]*',
}
def compile_route(route):
fp = StringIO()
last_pos = 0
for match in route_re.finditer(route):
#print "matches: ", match.groups()
fp.write(re.escape(route[last_pos:match.start()]))
var_name = match.group(1)
sep = match.group(2)
expr = match.group(3)
if var_name == 'config':
expr = re.escape(str(config.get(var_name)))
else:
if expr:
if sep == ':':
expr = predefined_regexps[expr]
# otherwise, treat what follows '~' as a regexp
else:
expr = '[^/]+'
if var_name != '':
expr = '(?P<%s>%s)' % (var_name, expr)
else:
expr = '(%s)' % (expr,)
fp.write(expr)
last_pos = match.end()
fp.write(re.escape(route[last_pos:]))
compiled_route = '^%s$' % fp.getvalue()
#print route, "->", compiled_route
return compiled_route
class Router(object):
'''WSGI routing class. Parses a URL and hands off a request to
some other WSGI application. If no suitable application is found,
responds with a 404.'''
def __init__(self):
self._new_routes = []
self._routes = []
self._paths = []
def add(self, route, app, methods=None):
self._new_routes.append((route, app, methods))
def _create_routes(self):
for route, app, methods in self._new_routes:
self._paths.append(route)
self._routes.append(
(re.compile(compile_route(route)),
app,
methods and dict.fromkeys(methods)))
self._new_routes = []
def __call__(self, environ, start_response):
# load up the config from the config file. Only needs to be
# done once per interpreter. This is the entry point of all
# siesta applications, so this is where we trap it.
_conf = config.get_config()
if _conf is None:
import os.path
fname = os.path.join(
environ.get('ll.config_dir', '/local/linden/etc'),
'indra.xml')
config.load(fname)
# proceed with handling the request
self._create_routes()
path_info = environ['PATH_INFO']
request_method = environ['REQUEST_METHOD']
allowed = []
for regex, app, methods in self._routes:
m = regex.match(path_info)
if m:
#print "groupdict:",m.groupdict()
if not methods or request_method in methods:
environ['paste.urlvars'] = m.groupdict()
return app(environ, start_response)
else:
allowed += methods
if allowed:
allowed = dict.fromkeys(allows).keys()
allowed.sort()
resp = exc.HTTPMethodNotAllowed(
headers={'Allow': ', '.join(allowed)})
else:
resp = exc.HTTPNotFound()
return resp(environ, start_response)
| gpl-3.0 | 1,837,857,754,948,852,700 | 32.299145 | 79 | 0.59298 | false |
jelmer/samba | buildtools/wafsamba/samba_install.py | 10 | 8530 | ###########################
# this handles the magic we need to do for installing
# with all the configure options that affect rpath and shared
# library use
import os
import Utils
from TaskGen import feature, before, after
from samba_utils import LIB_PATH, MODE_755, install_rpath, build_rpath
@feature('install_bin')
@after('apply_core')
@before('apply_link', 'apply_obj_vars')
def install_binary(self):
'''install a binary, taking account of the different rpath varients'''
bld = self.bld
# get the ldflags we will use for install and build
install_ldflags = install_rpath(self)
build_ldflags = build_rpath(bld)
if not self.bld.is_install:
# just need to set rpath if we are not installing
self.env.RPATH = build_ldflags
return
# work out the install path, expanding variables
install_path = getattr(self, 'samba_inst_path', None) or '${BINDIR}'
install_path = bld.EXPAND_VARIABLES(install_path)
orig_target = os.path.basename(self.target)
if install_ldflags != build_ldflags:
# we will be creating a new target name, and using that for the
# install link. That stops us from overwriting the existing build
# target, which has different ldflags
self.target += '.inst'
# setup the right rpath link flags for the install
self.env.RPATH = install_ldflags
if not self.samba_install:
# this binary is marked not to be installed
return
# tell waf to install the right binary
bld.install_as(os.path.join(install_path, orig_target),
os.path.join(self.path.abspath(bld.env), self.target),
chmod=MODE_755)
@feature('install_lib')
@after('apply_core')
@before('apply_link', 'apply_obj_vars')
def install_library(self):
'''install a library, taking account of the different rpath varients'''
if getattr(self, 'done_install_library', False):
return
bld = self.bld
default_env = bld.all_envs['default']
try:
if self.env['IS_EXTRA_PYTHON']:
bld.all_envs['default'] = bld.all_envs['extrapython']
install_ldflags = install_rpath(self)
build_ldflags = build_rpath(bld)
if not self.bld.is_install or not getattr(self, 'samba_install', True):
# just need to set the build rpath if we are not installing
self.env.RPATH = build_ldflags
return
# setup the install path, expanding variables
install_path = getattr(self, 'samba_inst_path', None)
if install_path is None:
if getattr(self, 'private_library', False):
install_path = '${PRIVATELIBDIR}'
else:
install_path = '${LIBDIR}'
install_path = bld.EXPAND_VARIABLES(install_path)
target_name = self.target
if install_ldflags != build_ldflags:
# we will be creating a new target name, and using that for the
# install link. That stops us from overwriting the existing build
# target, which has different ldflags
self.done_install_library = True
t = self.clone(self.env)
t.posted = False
t.target += '.inst'
t.name = self.name + '.inst'
self.env.RPATH = build_ldflags
else:
t = self
t.env.RPATH = install_ldflags
dev_link = None
# in the following the names are:
# - inst_name is the name with .inst. in it, in the build
# directory
# - install_name is the name in the install directory
# - install_link is a symlink in the install directory, to install_name
if getattr(self, 'samba_realname', None):
install_name = self.samba_realname
install_link = None
if getattr(self, 'soname', ''):
install_link = self.soname
if getattr(self, 'samba_type', None) == 'PYTHON':
inst_name = bld.make_libname(t.target, nolibprefix=True, python=True)
else:
inst_name = bld.make_libname(t.target)
elif self.vnum:
vnum_base = self.vnum.split('.')[0]
install_name = bld.make_libname(target_name, version=self.vnum)
install_link = bld.make_libname(target_name, version=vnum_base)
inst_name = bld.make_libname(t.target)
if not self.private_library:
# only generate the dev link for non-bundled libs
dev_link = bld.make_libname(target_name)
elif getattr(self, 'soname', ''):
install_name = bld.make_libname(target_name)
install_link = self.soname
inst_name = bld.make_libname(t.target)
else:
install_name = bld.make_libname(target_name)
install_link = None
inst_name = bld.make_libname(t.target)
if t.env.SONAME_ST:
# ensure we get the right names in the library
if install_link:
t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link)
else:
t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name)
t.env.SONAME_ST = ''
# tell waf to install the library
bld.install_as(os.path.join(install_path, install_name),
os.path.join(self.path.abspath(bld.env), inst_name),
chmod=MODE_755)
if install_link and install_link != install_name:
# and the symlink if needed
bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name))
if dev_link:
bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name))
finally:
bld.all_envs['default'] = default_env
@feature('cshlib')
@after('apply_implib')
@before('apply_vnum')
def apply_soname(self):
'''install a library, taking account of the different rpath varients'''
if self.env.SONAME_ST and getattr(self, 'soname', ''):
self.env.append_value('LINKFLAGS', self.env.SONAME_ST % self.soname)
self.env.SONAME_ST = ''
@feature('cshlib')
@after('apply_implib')
@before('apply_vnum')
def apply_vscript(self):
'''add version-script arguments to library build'''
if self.env.HAVE_LD_VERSION_SCRIPT and getattr(self, 'version_script', ''):
self.env.append_value('LINKFLAGS', "-Wl,--version-script=%s" %
self.version_script)
self.version_script = None
##############################
# handle the creation of links for libraries and binaries in the build tree
@feature('symlink_lib')
@after('apply_link')
def symlink_lib(self):
'''symlink a shared lib'''
if self.target.endswith('.inst'):
return
blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env))
libpath = self.link_task.outputs[0].abspath(self.env)
# calculat the link target and put it in the environment
soext=""
vnum = getattr(self, 'vnum', None)
if vnum is not None:
soext = '.' + vnum.split('.')[0]
link_target = getattr(self, 'link_name', '')
if link_target == '':
basename = os.path.basename(self.bld.make_libname(self.target, version=soext))
if getattr(self, "private_library", False):
link_target = '%s/private/%s' % (LIB_PATH, basename)
else:
link_target = '%s/%s' % (LIB_PATH, basename)
link_target = os.path.join(blddir, link_target)
if os.path.lexists(link_target):
if os.path.islink(link_target) and os.readlink(link_target) == libpath:
return
os.unlink(link_target)
link_container = os.path.dirname(link_target)
if not os.path.isdir(link_container):
os.makedirs(link_container)
os.symlink(libpath, link_target)
@feature('symlink_bin')
@after('apply_link')
def symlink_bin(self):
'''symlink a binary into the build directory'''
if self.target.endswith('.inst'):
return
if not self.link_task.outputs or not self.link_task.outputs[0]:
raise Utils.WafError('no outputs found for %s in symlink_bin' % self.name)
binpath = self.link_task.outputs[0].abspath(self.env)
bldpath = os.path.join(self.bld.env.BUILD_DIRECTORY, self.link_task.outputs[0].name)
if os.path.lexists(bldpath):
if os.path.islink(bldpath) and os.readlink(bldpath) == binpath:
return
os.unlink(bldpath)
os.symlink(binpath, bldpath)
| gpl-3.0 | 3,296,691,539,095,780,000 | 34.840336 | 100 | 0.60973 | false |
reberhardt7/sofa | sofa/__init__.py | 1 | 2889 | import logging
log = logging.getLogger(__name__)
from structure import (
APIAttribute,
APIValidator,
APIResource,
APICollection,
APISession,
VirtualResourceRegistry,
VirtualResource,
resource_registry,
AuthContext,
)
from validators import (
NumericIdValidator,
StringIdValidator,
BooleanValidator,
IntegerValidator,
FloatValidator,
StringValidator,
DateValidator,
DatetimeValidator,
EmailValidator,
ZipCodeValidator,
)
from readers import (
date_reader,
datetime_reader,
)
from writers import (
boolean_writer,
date_writer,
datetime_writer,
)
from types import (
NumericId,
StringId,
Boolean,
Integer,
Float,
String,
Date,
Datetime,
Email,
ZipCode,
)
from config import (
load_api_config,
api_config,
collection_class_map,
get_class_name,
)
from responses import (
ResourceCreated,
ResourceUpdated,
ResourceDeleted,
ResourceException,
)
from tree import Root as TraversalRoot
def includeme(config):
from structure import ContextPredicate
config.add_view_predicate('api_context', ContextPredicate)
config.add_view('sofa.views.nopath_view', context=TraversalRoot, renderer='json')
config.add_view('sofa.views.updated_view', context=ResourceUpdated, renderer='json')
config.add_view('sofa.views.CollectionViews', attr='get', context=APICollection,
renderer='json', request_method='GET', api_context='list')
config.add_view('sofa.views.CollectionViews', attr='post', context=APICollection,
renderer='json', request_method='POST', api_context='create')
config.add_view('sofa.views.CollectionViews', attr='other_verb', context=APICollection,
renderer='json')
config.add_view('sofa.views.ResourceViews', attr='get', context=APIResource,
renderer='json', request_method='GET', api_context='read')
config.add_view('sofa.views.ResourceViews', attr='put', context=APIResource,
renderer='json', request_method='PATCH', api_context='update')
config.add_view('sofa.views.ResourceViews', attr='delete', context=APIResource,
renderer='json', request_method='DELETE', api_context='delete')
config.add_view('sofa.views.ResourceViews', attr='other_verb', context=APIResource,
renderer='json')
config.add_view('sofa.views.resource_exception_view', context=ResourceException,
renderer='json')
def configure(sqla_session=None, api_config_path=None, session_lookup_func=None):
if sqla_session:
config.set_sqla_session(sqla_session)
if api_config_path:
config.load_api_config(api_config_path)
if session_lookup_func:
config.set_session_lookup_func(session_lookup_func)
| mit | 7,649,218,446,501,084,000 | 31.1 | 91 | 0.669782 | false |
vadimtk/chrome4sdp | tools/perf/page_sets/key_noop_cases.py | 9 | 2263 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class NoOpPage(page_module.Page):
def __init__(self, url, page_set):
super(NoOpPage, self).__init__(
url=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedMobilePageState)
def RunNavigateSteps(self, action_runner):
super(NoOpPage, self).RunNavigateSteps(action_runner)
# Let load activity settle.
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
# The default page interaction is simply waiting in an idle state.
with action_runner.CreateInteraction('IdleWaiting'):
action_runner.Wait(5)
class NoOpTouchScrollPage(NoOpPage):
def __init__(self, url, page_set):
super(NoOpTouchScrollPage, self).__init__(url=url, page_set=page_set)
def RunPageInteractions(self, action_runner):
# The noop touch motion should last ~5 seconds.
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage(direction='down', use_touch=True,
speed_in_pixels_per_second=300, distance=1500)
class KeyNoOpCasesPageSet(story.StorySet):
""" Key no-op cases """
def __init__(self):
super(KeyNoOpCasesPageSet, self).__init__()
# Why: An infinite rAF loop which does not modify the page should incur
# minimal activity.
self.AddStory(NoOpPage('file://key_noop_cases/no_op_raf.html', self))
# Why: An inifinite setTimeout loop which does not modify the page should
# incur minimal activity.
self.AddStory(NoOpPage('file://key_noop_cases/no_op_settimeout.html', self))
# Why: Scrolling an empty, unscrollable page should have no expensive side
# effects, as overscroll is suppressed in such cases.
self.AddStory(NoOpTouchScrollPage(
'file://key_noop_cases/no_op_scroll.html', self))
# Why: Feeding a stream of touch events to a no-op handler should be cheap.
self.AddStory(NoOpTouchScrollPage(
'file://key_noop_cases/no_op_touch_handler.html', self))
| bsd-3-clause | 4,583,632,569,536,024,000 | 36.098361 | 80 | 0.70791 | false |
MozillaSecurity/peach | Peach/Mutators/datatree.py | 3 | 4003 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from Peach.mutator import *
from Peach.Engine.common import *
class DataTreeRemoveMutator(Mutator):
"""
Remove nodes from data tree.
"""
def __init__(self, peach, node):
Mutator.__init__(self)
self.isFinite = True
self.name = "DataTreeRemoveMutator"
self._peach = peach
def next(self):
raise MutatorCompleted()
def getCount(self):
return 1
@staticmethod
def supportedDataElement(e):
if isinstance(e, DataElement) and e.isMutable:
return True
return False
def sequentialMutation(self, node):
self.changedName = node.getFullnameInDataModel()
node.setValue("")
def randomMutation(self, node, rand):
self.changedName = node.getFullnameInDataModel()
node.setValue("")
class DataTreeDuplicateMutator(Mutator):
"""
Duplicate a node's value starting at 2x through 50x.
"""
def __init__(self, peach, node):
Mutator.__init__(self)
self.isFinite = True
self.name = "DataTreeDuplicateMutator"
self._peach = peach
self._cnt = 2
self._maxCount = 50
def next(self):
self._cnt += 1
if self._cnt > self._maxCount:
raise MutatorCompleted()
def getCount(self):
return self._maxCount
@staticmethod
def supportedDataElement(e):
if isinstance(e, DataElement) and e.isMutable:
return True
return False
def sequentialMutation(self, node):
self.changedName = node.getFullnameInDataModel()
node.setValue(node.getValue() * self._cnt)
def randomMutation(self, node, rand):
self.changedName = node.getFullnameInDataModel()
count = rand.randint(0, self._cnt)
node.setValue(node.getValue() * count)
class DataTreeSwapNearNodesMutator(Mutator):
"""
Swap two nodes in the data model that are near each other.
TODO: Actually move the nodes instead of just the data.
"""
def __init__(self, peach, node):
Mutator.__init__(self)
self.isFinite = True
self.name = "DataTreeSwapNearNodesMutator"
self._peach = peach
def next(self):
raise MutatorCompleted()
def getCount(self):
return 1
def _moveNext(self, currentNode):
# Check if we are top dogM
if currentNode.parent is None or \
not isinstance(currentNode.parent, DataElement):
return None
# Get sibling
foundCurrent = False
for node in currentNode.parent:
if node == currentNode:
foundCurrent = True
continue
if foundCurrent and isinstance(node, DataElement):
return node
# Get sibling of parentM
return self._moveNext(currentNode.parent)
def _nextNode(self, node):
nextNode = None
# Walk down node tree
for child in node._children:
if isinstance(child, DataElement):
nextNode = child
break
# Walk over or up if we can
if nextNode is None:
nextNode = self._moveNext(node)
return nextNode
@staticmethod
def supportedDataElement(e):
if isinstance(e, DataElement) and e.isMutable:
return True
return False
def sequentialMutation(self, node):
self.changedName = node.getFullnameInDataModel()
nextNode = self._nextNode(node)
if nextNode is not None:
v1 = node.getValue()
v2 = nextNode.getValue()
node.setValue(v2)
nextNode.setValue(v1)
def randomMutation(self, node, rand):
self.changedName = node.getFullnameInDataModel()
self.sequentialMutation(node)
| mpl-2.0 | -7,462,667,516,743,321,000 | 27.592857 | 69 | 0.606545 | false |
HorriblePeople/CardMachine | WSotT_CardGen.py | 3 | 11603 | from PIL import Image, ImageFont, ImageDraw
import os, glob
from math import ceil
import ConfigParser
import PIL_Helper
class DeckConfiguration:
def __init__(self, configfilename="deck.cfg"):
Config = ConfigParser.ConfigParser()
Config.read(configfilename)
self.cardnum=0
#Global Deck Settings
settings = {}
for option in Config.options("Deck"):
settings[option]=Config.get("Deck", option)
self.cardpath = settings["cardpath"]
self.ResourcePath = settings["resourcepath"]
self.bleed_w = int(settings["cardwidth"])
self.bleed_h = int(settings["cardheight"])
self.w_marg = int(settings["marginw"])
self.h_marg = int(settings["marginh"])
self.bleedrect=[(self.w_marg,self.h_marg),(self.bleed_w-self.w_marg,self.bleed_h-self.h_marg)]
self.fullrect=[(0,0),(self.bleed_w,self.bleed_h)]
self.textmaxwidth = int(settings["textmaxwidth"])
self.chartextmaxwidth = int(settings["chartextmaxwidth"])
self.TitleFont = ImageFont.truetype(settings["titlefont"],int(settings["titlefontsize"]))
self.TypeFont = ImageFont.truetype(settings["typefont"],int(settings["typefontsize"]))
self.CopyFont = ImageFont.truetype(settings["copyfont"],int(settings["copyfontsize"]))
#Anchor Coordinates
anchorsettings = {}
for option in Config.options("Anchors"):
anchorsettings[option]=Config.get("Anchors", option)
self.TitleAnchor = (self.bleed_w/2,int(anchorsettings["titleanchory"]))
self.FlavorTextAnchor = (self.bleed_w/2,int(anchorsettings["flavortextanchory"]))
self.CopyTextAnchor = (self.bleed_w/2,int(anchorsettings["copytextanchory"]))
self.Anchor1 = (int(anchorsettings["anchor1_x"]),int(anchorsettings["anchor1_y"]))
self.Anchor2 = (int(anchorsettings["anchor2_x"]),int(anchorsettings["anchor2_y"]))
self.Anchor3 = (int(anchorsettings["anchor3_x"]),int(anchorsettings["anchor3_y"]))
self.Anchor4 = (int(anchorsettings["anchor4_x"]),int(anchorsettings["anchor4_y"]))
self.ArtAnchor = (int(anchorsettings["artanchorx"]),int(anchorsettings["artanchory"]))
def SubAnchor(anchor=(0,0), offset=20):
return (anchor[0],anchor[1]+offset)
def MakeBadCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
draw.rectangle(config.fullrect,(255,255,255,255))
PIL_Helper.AddText(image = image,
text = "This Card Intentionally Left Blank",
font = config.TitleFont,
fill = (200,200,200),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
return image
def MakeTableCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "tableclean_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = "TABLE",
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "(clean)",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.TitleAnchor,70),
max_width = config.textmaxwidth)
return image
def MakeFoodCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "food_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = tags[1],
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Food",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.TitleAnchor,-35),
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Filling:",
font = config.TypeFont,
fill = (0,0,0),
anchor = config.Anchor3,
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[2],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor3,40),
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[3].replace(r'\n', '\n'),
font = config.TypeFont,
fill = (0,0,0),
anchor = config.FlavorTextAnchor,
max_width = config.textmaxwidth)
return image
def MakeCharacterCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "char_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = tags[1],
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.chartextmaxwidth)
PIL_Helper.AddText(image = image,
text = "Character",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.TitleAnchor,-35),
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Base Tip:",
font = config.TypeFont,
fill = (0,0,0),
anchor = config.Anchor1,
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[2],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor1,25),
max_width = 200)
PIL_Helper.AddText(image = image,
text = "Apetite:",
font = config.TypeFont,
fill = (0,0,0),
anchor = config.Anchor2,
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[3],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor2,25),
max_width = 200)
PIL_Helper.AddText(image = image,
text = "Wrath:",
font = config.TypeFont,
fill = (0,0,0),
anchor = config.Anchor3,
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[4],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor3,40),
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[5].replace(r'\n', '\n'),
font = config.TypeFont,
fill = (0,0,0),
anchor = config.FlavorTextAnchor,
max_width = config.textmaxwidth)
return image
def MakeEatCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "eat_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = tags[1],
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Eating:",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.TitleAnchor,-35),
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Eaten:",
font = config.TypeFont,
fill = (0,0,0),
anchor = config.Anchor3,
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[2],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor3,40),
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[3].replace(r'\n', '\n'),
font = config.TypeFont,
fill = (0,0,0),
anchor = config.FlavorTextAnchor,
max_width = config.textmaxwidth)
return image
def MakeWrathCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "eat_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = tags[1],
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Wrath:",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.TitleAnchor,-35),
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Magnitude:",
font = config.TypeFont,
fill = (0,0,0),
anchor = config.Anchor3,
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[2],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor3,40),
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[3].replace(r'\n', '\n'),
font = config.TypeFont,
fill = (0,0,0),
anchor = config.FlavorTextAnchor,
max_width = config.textmaxwidth)
return image
def MakePartyCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "party_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = tags[1],
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Party",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.TitleAnchor,-35),
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Party Of:",
font = config.TypeFont,
fill = (0,0,0),
anchor = config.Anchor3,
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[2],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor3,40),
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[3].replace(r'\n', '\n'),
font = config.TypeFont,
fill = (0,0,0),
anchor = config.FlavorTextAnchor,
max_width = config.textmaxwidth)
return image
def MakeCleanCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "tableclean_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = "Table Cleaned!",
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
return image
def MakeTipCard(config, tags):
image = Image.new("RGBA", (config.bleed_w, config.bleed_h))
draw = ImageDraw.Draw(image)
bg_im = Image.open(config.ResourcePath + "food_frontbg.png")
image.paste(bg_im,(0,0))
draw.rectangle(config.bleedrect,
outline=(0,0,0,255))
PIL_Helper.AddText(image = image,
text = tags[1],
font = config.TitleFont,
fill = (0,0,0),
anchor = config.TitleAnchor,
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Tip!",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.TitleAnchor,-35),
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = tags[2],
font = config.TypeFont,
fill = (0,0,0),
anchor = config.FlavorTextAnchor,
max_width = config.textmaxwidth)
PIL_Helper.AddText(image = image,
text = "Value on eBay:",
font = config.TypeFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor3,-10),
max_width = 200)
PIL_Helper.AddText(image = image,
text = tags[3],
font = config.TitleFont,
fill = (0,0,0),
anchor = SubAnchor(config.Anchor3,80),
max_width = 200)
return image
MakerDict={
"TABLE": MakeTableCard,
"FOOD": MakeFoodCard,
"CHAR": MakeCharacterCard,
"EAT": MakeEatCard,
"PARTY": MakePartyCard,
"CLEAN": MakeCleanCard,
"TIP" : MakeTipCard,
"WRATH" : MakeWrathCard
}
def main(linein, configset):
configset.cardnum+=1
tags = linein.split('`')
try:
im = MakerDict[tags[0]](configset,tags)
im.paste(Image.open(configset.ResourcePath + "artmissing.png"),configset.ArtAnchor)
except:
im = MakeBadCard(configset,tags)
print "Warning: Bad Card"
return im
if __name__ == '__main__':
configset = DeckConfiguration("deck.cfg")
main("CHAR`Test Card`-2`3`4`",configset) | mit | 7,997,580,099,266,031,000 | 26.432624 | 96 | 0.6806 | false |
ai-se/parGALE | epoal_src/Z3ModelWPTNoObjectives.py | 1 | 6469 | '''
Created on Jan 17, 2014
@author: ezulkosk
'''
from z3 import *
FeatureIndexMap = {}
FeatureVariable = []
FeatureIndexMap['web_portal'] = 0
web_portal = Bool('web_portal')
FeatureVariable.append(web_portal)
FeatureIndexMap['web_portal'] = 1
web_portal = Bool('web_portal')
FeatureVariable.append(web_portal)
FeatureIndexMap['add_services'] = 2
add_services = Bool('add_services')
FeatureVariable.append(add_services)
FeatureIndexMap['site_stats'] = 3
site_stats = Bool('site_stats')
FeatureVariable.append(site_stats)
FeatureIndexMap['basic'] = 4
basic = Bool('basic')
FeatureVariable.append(basic)
FeatureIndexMap['advanced'] = 5
advanced = Bool('advanced')
FeatureVariable.append(advanced)
FeatureIndexMap['site_search'] = 6
site_search = Bool('site_search')
FeatureVariable.append(site_search)
FeatureIndexMap['images'] = 7
images = Bool('images')
FeatureVariable.append(images)
FeatureIndexMap['text'] = 8
text = Bool('text')
FeatureVariable.append(text)
FeatureIndexMap['html'] = 9
html = Bool('html')
FeatureVariable.append(html)
FeatureIndexMap['dynamic'] = 10
dynamic = Bool('dynamic')
FeatureVariable.append(dynamic)
FeatureIndexMap['ad_server'] = 11
ad_server = Bool('ad_server')
FeatureVariable.append(ad_server)
FeatureIndexMap['reports'] = 12
reports = Bool('reports')
FeatureVariable.append(reports)
FeatureIndexMap['popups'] = 13
popups = Bool('popups')
FeatureVariable.append(popups)
FeatureIndexMap['banners'] = 14
banners = Bool('banners')
FeatureVariable.append(banners)
FeatureIndexMap['ban_img'] = 15
ban_img = Bool('ban_img')
FeatureVariable.append(ban_img)
FeatureIndexMap['ban_flash'] = 16
ban_flash = Bool('ban_flash')
FeatureVariable.append(ban_flash)
FeatureIndexMap['keyword'] = 17
keyword = Bool('keyword')
FeatureVariable.append(keyword)
FeatureIndexMap['web_server'] = 18
web_server = Bool('web_server')
FeatureVariable.append(web_server)
FeatureIndexMap['logging'] = 19
logging = Bool('logging')
FeatureVariable.append(logging)
FeatureIndexMap['db'] = 20
db = Bool('db')
FeatureVariable.append(db)
FeatureIndexMap['file'] = 21
file = Bool('file')
FeatureVariable.append(file)
FeatureIndexMap['protocol'] = 22
protocol = Bool('protocol')
FeatureVariable.append(protocol)
FeatureIndexMap['nttp'] = 23
nttp = Bool('nttp')
FeatureVariable.append(nttp)
FeatureIndexMap['ftp'] = 24
ftp = Bool('ftp')
FeatureVariable.append(ftp)
FeatureIndexMap['https'] = 25
https = Bool('https')
FeatureVariable.append(https)
FeatureIndexMap['cont'] = 26
cont = Bool('cont')
FeatureVariable.append(cont)
FeatureIndexMap['static'] = 27
static = Bool('static')
FeatureVariable.append(static)
FeatureIndexMap['active'] = 28
active = Bool('active')
FeatureVariable.append(active)
FeatureIndexMap['asp'] = 29
asp = Bool('asp')
FeatureVariable.append(asp)
FeatureIndexMap['php'] = 30
php = Bool('php')
FeatureVariable.append(php)
FeatureIndexMap['jsp'] = 31
jsp = Bool('jsp')
FeatureVariable.append(jsp)
FeatureIndexMap['cgi'] = 32
cgi = Bool('cgi')
FeatureVariable.append(cgi)
FeatureIndexMap['persistence'] = 33
persistence = Bool('persistence')
FeatureVariable.append(persistence)
FeatureIndexMap['xml'] = 34
xml = Bool('xml')
FeatureVariable.append(xml)
FeatureIndexMap['database'] = 35
database = Bool('database')
FeatureVariable.append(database)
FeatureIndexMap['ri'] = 36
ri = Bool('ri')
FeatureVariable.append(ri)
FeatureIndexMap['data_storage'] = 37
data_storage = Bool('data_storage')
FeatureVariable.append(data_storage)
FeatureIndexMap['data_transfer'] = 38
data_transfer = Bool('data_transfer')
FeatureVariable.append(data_transfer)
FeatureIndexMap['user_auth'] = 39
user_auth = Bool('user_auth')
FeatureVariable.append(user_auth)
FeatureIndexMap['performance'] = 40
performance = Bool('performance')
FeatureVariable.append(performance)
FeatureIndexMap['ms'] = 41
ms = Bool('ms')
FeatureVariable.append(ms)
FeatureIndexMap['sec'] = 42
sec = Bool('sec')
FeatureVariable.append(sec)
FeatureIndexMap['min'] = 43
min = Bool('min')
FeatureVariable.append(min)
#s = Solver()
s = Goal()
# Parent-Children
s.add(Implies(add_services, web_portal))
s.add(Implies(web_server, web_portal))
s.add(Implies(persistence, web_portal))
s.add(Implies(ri, web_portal))
s.add(Implies(performance, web_portal))
s.add(Implies(site_stats, add_services))
s.add(Implies(site_search, add_services))
s.add(Implies(ad_server, add_services))
s.add(Implies(basic, site_stats))
s.add(Implies(advanced, site_stats))
s.add(Implies(images, site_search))
s.add(Implies(text, site_search))
s.add(Implies(html, text))
s.add(Implies(dynamic, text))
s.add(Implies(reports, ad_server))
s.add(Implies(popups, ad_server))
s.add(Implies(banners, ad_server))
s.add(Implies(keyword, ad_server))
s.add(Implies(ban_img, banners))
s.add(Implies(ban_flash, banners))
s.add(Implies(logging, web_server))
s.add(Implies(protocol, web_server))
s.add(Implies(cont, web_server))
s.add(Implies(db, logging))
s.add(Implies(file, logging))
s.add(Implies(nttp, protocol))
s.add(Implies(ftp, protocol))
s.add(Implies(https, protocol))
s.add(Implies(static, cont))
s.add(Implies(active, cont))
s.add(Implies(asp, active))
s.add(Implies(php, active))
s.add(Implies(jsp, active))
s.add(Implies(cgi, active))
s.add(Implies(xml, persistence))
s.add(Implies(database, persistence))
s.add(Implies(data_storage, ri))
s.add(Implies(data_transfer, ri))
s.add(Implies(user_auth, ri))
s.add(Implies(ms, performance))
s.add(Implies(sec, performance))
s.add(Implies(min, performance))
# Mandatory-Children
s.add(web_server == web_portal)
s.add(basic == site_stats)
s.add(html == text)
s.add(reports == ad_server)
s.add(banners == ad_server)
s.add(ban_img == banners)
s.add(cont == web_server)
s.add(static == cont)
# Exclusive-Or Constraints
s.add(db == And(Not(file), logging))
s.add(file == And(Not(db), logging))
s.add(xml == And(Not(database), persistence))
s.add(database == And(Not(xml), persistence))
s.add(ms == And(Not(sec), Not(min), performance))
s.add(sec == And(Not(ms), Not(min), performance))
s.add(min == And(Not(ms), Not(sec), performance))
# Or Constraints
s.add(protocol == Or(nttp, ftp, https))
s.add(active == Or(asp, php, jsp, cgi))
s.add(ri == Or(data_storage, data_transfer, user_auth))
# Requires Constraints
s.add(Implies(dynamic, active))
s.add(Implies(keyword, text))
s.add(Implies(db, database))
s.add(Implies(file, ftp))
s.add(Implies(data_transfer, https))
# Excludes Constraints
s.add(Not(And(https, ms)))
s.add(web_portal == True)
| unlicense | -2,046,074,262,876,872,400 | 27.126087 | 55 | 0.735044 | false |
CalthorpeAnalytics/urbanfootprint | footprint/main/resources/pickled_dict_field.py | 1 | 2701 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
import string
from tastypie.fields import DictField, NOT_PROVIDED, ApiField
from footprint.main.lib.functions import map_dict_to_dict, deep_copy_dict_structure, my_deep_copy
__author__ = 'calthorpe_analytics'
class ObjectField(ApiField):
"""
Handles any object by turning it into a dict by recursively using each object's __dict__ attribute
Arrays are left as arrays
Since class data is removed a reference instance would be needed to rehydrate it
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return my_deep_copy(value, True)
class PickledObjField(ObjectField):
"""
For read-only configurations, dehydration of arbitrary object graphs. Hydration isn't possible without having a reference instance to know the classes
"""
def dehydrate(self, bundle):
"""
Handles the object dehydration
:param bundle:
:return:
"""
# Deep copy the structure to create new dict instance so we don't mutilate the source
obj = super(PickledObjField, self).dehydrate(bundle)
return my_deep_copy(obj, True)
class PickledDictField(ApiField):
def dehydrate(self, bundle):
"""
:param bundle:
:return:
"""
# Deep copy the structure to create new dict instance so we don't mutilate the source
try:
if not isinstance(getattr(bundle.obj, self.attribute), dict):
return {}
value = super(PickledDictField, self).dehydrate(bundle)
return my_deep_copy(value)
except:
setattr(bundle.obj, self.attribute, None) # value got deformed--clear it
return my_deep_copy(super(PickledDictField, self).dehydrate(bundle))
def hydrate(self, bundle):
"""
Hydrates a dict of resource URI to the corresponding instances by resolving the URIs. Like dehydrate_selections, this could be generalized
:param bundle:
:return:
"""
value = super(PickledDictField, self).hydrate(bundle)
return value
| gpl-3.0 | -7,542,301,172,931,074,000 | 34.539474 | 158 | 0.669382 | false |
kichkasch/fileextractor | imagegenerator/GeneratorCoreLinux.py | 1 | 13067 | """
Core for the ImageGenerator - Implementation for Linux.
The main idea for this implementation is to pass the request for imaging
to the Linux command C{dd}.
This module registers itself with the L{CoreManager} as a core.
FileExtractor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FileExtractor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FileExtractor. If not, see <http://www.gnu.org/licenses/>.
@var NAME_IMPL: Name of this implementation
@type NAME_IMPL: C{String}
@var PARAM_INPUTFILE: Name of the parameter for specifying the input file for the dd command
@type PARAM_INPUTFILE: C{String}
@var PARAM_OUTPUTFILE: Name of the parameter for specifying the input file for the dd command
@type PARAM_OUTPUTFILE: C{String}
@var PARAM_BLOCKSIZE: Name of the parameter for specifying the blocksize for the dd command
@type PARAM_BLOCKSIZE: C{String}
@var FSTAB_LOCATION: Location for the file system table within the local file system. Required for
determing possible sources for the imaging.
@type FSTAB_LOCATION: C{String}
@var PROC_LOCATION: Location of the file holding partition information inside the proc file system
@type PROC_LOCATION: C{String}
@var DEFAULT_PATH_DD: Default location for the dd command
@type DEFAULT_PATH_DD: C{String}
"""
import GeneratorCoreAbstract
import Runtime
import os
import CoreManager
import FESettings
import ImageSettings
NAME_IMPL = "Linux"
PARAM_INPUTFILE = "if"
PARAM_OUTPUTFILE = "of"
PARAM_BLOCKSIZE="bs"
FSTAB_LOCATION = "/etc/fstab"
PROC_LOCATION = "/proc/partitions"
FDISK_LOCATION = "/sbin/fdisk"
DEV_PREFIX = "/dev/"
DEFAULT_PATH_DD = "/bin/dd"
TMP_FDISK_FILE = ImageSettings.PATH_TMP_FDISK
FILE_TYPES = ImageSettings.PATH_FILETYPES
class GeneratorCore(GeneratorCoreAbstract.CoreInterface):
"""
Class for implementing the core for Linux systems.
@ivar _settings: Settings for the execution
@type _settings: L{Runtime.Settings}
"""
def __init__(self, settings):
"""
Initialises the core.
The parameters are assigned to instance variables and the super constructor
(L{GeneratorCoreAbstract.CoreInterface.__init__}) is called for initialising the name of the
implementation.
"""
GeneratorCoreAbstract.CoreInterface.__init__( self, NAME_IMPL)
self._settings = settings
def createImage(self, status):
"""
Invokes the OS command for the image generation.
The filesize observer (L{Runtime.FileSizeObserver}) is started and after assembling the command
it is started using the L{os.system} method. Afterwards, the status object is set to finished.
Corrosponging to the return value of the system call, the error field in the status instance
is set.
@param status: Reference to the status object used for this execution
@type status: L{Runtime.Status}
@return: Return value of the OS command C{dd}
@rtype: C{int}
"""
filesize_observer = Runtime.FileSizeObserver(self._settings.getDestination(), status)
filesize_observer.start()
command = self._assembleCommand()
ret = os.system(command)
status.setFinished()
if ret != 0:
st = "Check log file '%s'." %(self._settings.getRedirectOutputBuffer())
try:
file = open(self._settings.getRedirectOutputBuffer(), 'r')
msg = file.read()
file.close()
st = msg
except Error, msg:
pass
status.setError("Linux Core: \nError whilst imaging\nErrorCode: %s\n%s" %(str(ret), st))
return ret
def _assembleCommand(self):
"""
Assembles the command as it would be used in a Shell environment.
Puts together the command C{dd} with the parameters for source file, destination
file and blocksize and, if enabled, finishes with the bits for the output redirection. If
redirection is enabled, both buffers will be redirected (standard output as well as
standard error output).
@return: Command String as used on a shell
@rtype: C{String}
"""
command = self._settings.getPathDD()
bs = str(self._settings.getBlocksize())
source = self._settings.getSource()
dest = self._settings.getDestination()
st = "\"" + command + "\"" + " " + PARAM_BLOCKSIZE + "=" + bs + " " + PARAM_INPUTFILE + "=" + \
"\"" + source + "\"" + " " + PARAM_OUTPUTFILE + "=" + "\"" + dest + "\""
if self._settings.getRedirectOutputBuffer() != None:
st = st + " > " + self._settings.getRedirectOutputBuffer() + " 2> " + self._settings.getRedirectOutputBuffer()
# we need to be root
sudo = FESettings.getSettings().getValue('command_sudo')
st = sudo + " " + st
return st
def getPossibleSources(self):
"""
Returns a list of possible sources for the machine.
The request is passed to the private function L{_getListFromFstab}, which
examines the File System Table file of the machine (/etc/fstab) and attempts
to extract the devices.
@return: Return Values of the function L{_getListFromFstab}
@rtype: C{List} of C{Strings}; C{List} of {String}
"""
try:
list = self._getListFromProc()
if list:
return list
else:
return self._getListFromFstab()
except IOError, msg:
return self._getListFromFstab()
def getSourceInfo(self):
"""
Info on List of possible sources
Provides some information about how the list of possible sources is assembled
and how to deal with the provided information.
@return: Info text - several lines
@rtype: C{String}
"""
return "\nHow to find your device\n" + "\n" \
"Firstly, ImageGenerator attempts to process proc \nfilesystem information in" \
"order to gather \ninformation about the available devices (path, size, type); \n" \
"if thise fails, a list is assembles using the Linux \nfilesystem table (fstab)\n\n" \
"Some experiences:\n" \
"\tFloppy Disk: \t/dev/fd?\n" \
"\tMemory Stick: \t/dev/sda1\n" \
"\tHard disk: \t/dev/hda \n" \
"\tHD partition: \t/dev/hdc1\n" \
"\tCDROM drive:\t/dev/hdc\n" \
"Also check the commands 'fdisk -l' or 'mount' for more information."
def _getListFromProc(self):
"""
An implementation to get suggestions for imagable resourses.
Information from the proc file system tree is evaluated.
@return: List of names with some more information; List of names for devices extracted form the fstab file
@rtype: C{List} of C{String}; C{List} of C{String}
"""
global DEV_PREFIX, PROC_LOCATION
ret = []
ret_detail = []
try:
partitions = open(PROC_LOCATION)
except Error, msg:
return None
types = self._getTypesFromFdisk()
line = " "
linecount = 0
columnName = 3
columnBlocks = None
while line != "":
line = partitions.readline()
if line.strip() == "":
continue
entries = line.split()
if linecount == 0:
columnName = entries.index('name')
try:
columnBlocks = entries.index('#blocks')
except Error, msg:
pass
else:
path = DEV_PREFIX + entries[columnName].strip()
ret.append(path)
if columnBlocks: # if size information available
details = path + " (%d MB)" % (int(entries[columnBlocks].strip()) / 1024)
else:
details = path
if types: # if information about partition type available
if types.has_key(path):
details += " [%s]" %(types[path])
ret_detail.append(details)
linecount += 1
partitions.close()
return ret_detail, ret
def _getTypesFromFdisk(self):
global DEV_PREFIX
command = FDISK_LOCATION + " -l > " + TMP_FDISK_FILE
ret = os.system(command)
if ret != 0:
print "Partition type determination failed on fdisk execution"
return None
tmpFile = open(TMP_FDISK_FILE, 'r')
posId = None
ret = {}
while 1:
line = tmpFile.readline()
if not line:
break
if line.strip().startswith('Device'): # header of the table
posId = line.index('Id')
if line.startswith(DEV_PREFIX): # that's our entry
if posId:
partName = line.split()[0].strip()
typeId = line[posId:posId+2]
ret[partName] = self._getNameForType(typeId)
tmpFile.close()
return ret
def _getNameForType(self, typeId):
import os
import os.path
import sys
f = open(FILE_TYPES, 'r')
while 1:
line = f.readline()
if not line:
break
if line.startswith(typeId.strip()):
startPosString = line.split()[1]
startPos = line.index(startPosString)
name = line[startPos:].strip()
return name
f.close()
return typeId
def getSizeEstimationForPartition(self, partitionName):
"""
Extracts information from the proc file system to determine filesize.
We can only read block size; we assume block size of 1024 Bytes to determine size in Byte.
"""
global DEV_PREFIX, PROC_LOCATION
try:
partitions = open(PROC_LOCATION)
except Error, msg:
return None
if not partitionName.startswith(DEV_PREFIX):
return None # estimation impossible - we assume, we are under /dev
name = partitionName[len(DEV_PREFIX):]
print name
line = " "
linecount = 0
columnName = 3
columnBlocks = None
while line != "":
line = partitions.readline()
if line.strip() == "":
continue
entries = line.split()
if linecount == 0:
columnName = entries.index('name')
try:
columnBlocks = entries.index('#blocks')
except Error, msg:
return None # we need this here
else:
if entries[columnName] == name:
return int(entries[columnBlocks]) * 1024
linecount += 1
return None
def _getListFromFstab(self):
"""
An implementation to get suggestions for imagable resourses.
The file for File System talbe on the machine is processed and it is attempted to
extract information about devices from it. In fact, all the lines, which do not start
with a slash are skipped. Afterwards, always the first column of a line is taken
and a list of all them together is assembled.
@return: List of names with some more information; List of names for devices extracted form the fstab file
@rtype: C{List} of C{String}; C{List} of C{String}
"""
fstab = open(FSTAB_LOCATION)
ret = []
line = " "
while line != "":
line = fstab.readline()
if line == "": # skip if EOF
continue
if line[0] != "/": # skip lines with comments and other stuff
continue
vals = line.split()
ret.append(vals[0])
fstab.close()
return ret, ret
def getDefaultDDLocation(self):
"""
Gives a location for the dd command which is very likely to be used with this
implementation.
"""
return DEFAULT_PATH_DD
CoreManager.getInstance().registerCore(NAME_IMPL, GeneratorCore)
| gpl-3.0 | -6,145,571,337,880,757,000 | 36.016997 | 122 | 0.57848 | false |
t794104/ansible | contrib/inventory/vagrant.py | 30 | 4064 | #!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_path = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <[email protected]>
# 2015 Igor Khomyakov <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import os.path
import subprocess
import re
from paramiko import SSHConfig
from optparse import OptionParser
from collections import defaultdict
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import StringIO
_group = 'vagrant' # a default group
_ssh_to_ansible = [('user', 'ansible_user'),
('hostname', 'ansible_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_port')]
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
# list all the running boxes
def list_running_boxes():
output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
boxes = []
for line in output:
matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
# get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
config = SSHConfig()
config.parse(StringIO(output))
host_config = config.lookup(box_name)
# man 5 ssh_config:
# > It is possible to have multiple identity files ...
# > all these identities will be tried in sequence.
for id in host_config['identityfile']:
if os.path.isfile(id):
host_config['identityfile'] = id
return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
meta = defaultdict(dict)
for host in ssh_config:
meta['hostvars'][host] = ssh_config[host]
print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
sys.exit(0)
# Get out the host details
# ------------------------------
elif options.host:
print(json.dumps(get_a_ssh_config(options.host)))
sys.exit(0)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(0)
| gpl-3.0 | 3,344,007,663,330,796,000 | 30.022901 | 112 | 0.659203 | false |
saulshanabrook/pushgp.py | inspyred/ec/variators/mutators.py | 4 | 10044 | """
===============
:mod:`mutators`
===============
.. Copyright 2012 Inspired Intelligence Initiative
.. This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.. You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
.. module:: mutators
.. moduleauthor:: Aaron Garrett <[email protected]>
"""
import copy
import functools
def mutator(mutate):
"""Return an inspyred mutator function based on the given function.
This function generator takes a function that operates on only
one candidate to produce a single mutated candidate. The generator
handles the iteration over each candidate in the set to be mutated.
The given function ``mutate`` must have the following signature::
mutant = mutate(random, candidate, args)
This function is most commonly used as a function decorator with
the following usage::
@mutator
def mutate(random, candidate, args):
# Implementation of mutation
pass
The generated function also contains an attribute named
``single_mutation`` which holds the original mutation function.
In this way, the original single-candidate function can be
retrieved if necessary.
"""
@functools.wraps(mutate)
def ecspy_mutator(random, candidates, args):
mutants = []
for i, cs in enumerate(candidates):
mutants.append(mutate(random, cs, args))
return mutants
ecspy_mutator.single_mutation = mutate
return ecspy_mutator
@mutator
def bit_flip_mutation(random, candidate, args):
"""Return the mutants produced by bit-flip mutation on the candidates.
This function performs bit-flip mutation. If a candidate solution contains
non-binary values, this function leaves it unchanged.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on a bit by bit basis.
"""
rate = args.setdefault('mutation_rate', 0.1)
mutant = copy.copy(candidate)
if len(mutant) == len([x for x in mutant if x in [0, 1]]):
for i, m in enumerate(mutant):
if random.random() < rate:
mutant[i] = (m + 1) % 2
return mutant
@mutator
def random_reset_mutation(random, candidate, args):
"""Return the mutants produced by randomly choosing new values.
This function performs random-reset mutation. It assumes that
candidate solutions are composed of discrete values. This function
makes use of the bounder function as specified in the EC's
``evolve`` method, and it assumes that the bounder contains
an attribute called *values* (which is true for instances of
``DiscreteBounder``).
The mutation moves through a candidate solution and, with rate
equal to the *mutation_rate*, randomly chooses a value from the
set of allowed values to be used in that location. Note that this
value may be the same as the original value.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on an element by element basis.
"""
bounder = args['_ec'].bounder
try:
values = bounder.values
except AttributeError:
values = None
if values is not None:
rate = args.setdefault('mutation_rate', 0.1)
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
if random.random() < rate:
mutant[i] = random.choice(values)
return mutant
else:
return candidate
@mutator
def scramble_mutation(random, candidate, args):
"""Return the mutants created by scramble mutation on the candidates.
This function performs scramble mutation. It randomly chooses two
locations along the candidate and scrambles the values within that
slice.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied to the candidate as a whole (i.e., it
either mutates or it does not, based on the rate).
"""
rate = args.setdefault('mutation_rate', 0.1)
if random.random() < rate:
size = len(candidate)
p = random.randint(0, size-1)
q = random.randint(0, size-1)
p, q = min(p, q), max(p, q)
s = candidate[p:q+1]
random.shuffle(s)
return candidate[:p] + s[::-1] + candidate[q+1:]
else:
return candidate
@mutator
def inversion_mutation(random, candidate, args):
"""Return the mutants created by inversion mutation on the candidates.
This function performs inversion mutation. It randomly chooses two
locations along the candidate and reverses the values within that
slice.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied to the candidate as a whole (i.e., it
either mutates or it does not, based on the rate).
"""
rate = args.setdefault('mutation_rate', 0.1)
if random.random() < rate:
size = len(candidate)
p = random.randint(0, size-1)
q = random.randint(0, size-1)
p, q = min(p, q), max(p, q)
s = candidate[p:q+1]
return candidate[:p] + s[::-1] + candidate[q+1:]
else:
return candidate
@mutator
def gaussian_mutation(random, candidate, args):
"""Return the mutants created by Gaussian mutation on the candidates.
This function performs Gaussian mutation. This function
makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
- *gaussian_mean* -- the mean used in the Gaussian function (default 0)
- *gaussian_stdev* -- the standard deviation used in the Gaussian function
(default 1)
The mutation rate is applied on an element by element basis.
"""
mut_rate = args.setdefault('mutation_rate', 0.1)
mean = args.setdefault('gaussian_mean', 0.0)
stdev = args.setdefault('gaussian_stdev', 1.0)
bounder = args['_ec'].bounder
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
if random.random() < mut_rate:
mutant[i] += random.gauss(mean, stdev)
mutant = bounder(mutant, args)
return mutant
@mutator
def nonuniform_mutation(random, candidate, args):
"""Return the mutants produced by nonuniform mutation on the candidates.
The function performs nonuniform mutation as specified in
(Michalewicz, "Genetic Algorithms + Data Structures = Evolution
Programs," Springer, 1996). This function also makes use of the
bounder function as specified in the EC's ``evolve`` method.
.. note::
This function **requires** that *max_generations* be specified in
the *args* dictionary. Therefore, it is best to use this operator
in conjunction with the ``generation_termination`` terminator.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *max_generations* -- the maximum number of generations for which
evolution should take place
Optional keyword arguments in args:
- *mutation_strength* -- the strength of the mutation, where higher
values correspond to greater variation (default 1)
"""
bounder = args['_ec'].bounder
num_gens = args['_ec'].num_generations
max_gens = args['max_generations']
strength = args.setdefault('mutation_strength', 1)
exponent = (1.0 - num_gens / float(max_gens)) ** strength
mutant = copy.copy(candidate)
for i, (c, lo, hi) in enumerate(zip(candidate, bounder.lower_bound, bounder.upper_bound)):
if random.random() <= 0.5:
new_value = c + (hi - c) * (1.0 - random.random() ** exponent)
else:
new_value = c - (c - lo) * (1.0 - random.random() ** exponent)
mutant[i] = new_value
return mutant
| bsd-3-clause | 6,312,254,082,177,958,000 | 33.871429 | 94 | 0.635106 | false |
wakatime/wakatime | wakatime/heartbeat.py | 1 | 12605 | # -*- coding: utf-8 -*-
"""
wakatime.heartbeat
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2017 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import re
from subprocess import PIPE
from .compat import u, json, is_win, Popen
from .exceptions import SkipHeartbeat
from .project import get_project_info
from .stats import get_file_stats
from .utils import get_user_agent, should_exclude, format_file_path, find_project_file
log = logging.getLogger('WakaTime')
class Heartbeat(object):
"""Heartbeat data for sending to API or storing in offline cache."""
skip = False
args = None
configs = None
time = None
entity = None
type = None
category = None
is_write = None
project = None
branch = None
language = None
dependencies = None
lines = None
lineno = None
cursorpos = None
user_agent = None
_sensitive_when_hiding_filename = (
'dependencies',
'lines',
'lineno',
'cursorpos',
)
_sensitive_when_hiding_branch = (
'branch',
)
def __init__(self, data, args, configs, _clone=None):
if not data:
self.skip = u('Skipping because heartbeat data is missing.')
return
self.args = args
self.configs = configs
self.entity = data.get('entity')
self.time = data.get('time', data.get('timestamp'))
self.is_write = data.get('is_write')
self.user_agent = data.get('user_agent') or get_user_agent(args.plugin)
self.type = data.get('type', data.get('entity_type'))
if self.type not in ['file', 'domain', 'app']:
self.type = 'file'
self.category = data.get('category')
allowed_categories = [
'coding',
'building',
'indexing',
'debugging',
'running tests',
'manual testing',
'writing tests',
'browsing',
'code reviewing',
'designing',
]
if self.category not in allowed_categories:
self.category = None
if not _clone:
exclude = self._excluded_by_pattern()
if exclude:
self.skip = u('Skipping because matches exclude pattern: {pattern}').format(
pattern=u(exclude),
)
return
if self.type == 'file':
self.entity = format_file_path(self.entity)
self._format_local_file()
if not self._file_exists():
self.skip = u('File does not exist; ignoring this heartbeat.')
return
if self._excluded_by_missing_project_file():
self.skip = u('Skipping because missing .wakatime-project file in parent path.')
return
if args.local_file and not os.path.isfile(args.local_file):
args.local_file = None
project, branch = get_project_info(configs, self, data)
self.project = project
self.branch = branch
if self._excluded_by_unknown_project():
self.skip = u('Skipping because project unknown.')
return
try:
stats = get_file_stats(self.entity,
entity_type=self.type,
lineno=data.get('lineno'),
cursorpos=data.get('cursorpos'),
plugin=args.plugin,
language=data.get('language'),
local_file=args.local_file)
except SkipHeartbeat as ex:
self.skip = u(ex) or 'Skipping'
return
else:
self.project = data.get('project')
self.branch = data.get('branch')
stats = data
for key in ['language', 'dependencies', 'lines', 'lineno', 'cursorpos']:
if stats.get(key) is not None:
setattr(self, key, stats[key])
def update(self, attrs):
"""Return a copy of the current Heartbeat with updated attributes."""
data = self.dict()
data.update(attrs)
heartbeat = Heartbeat(data, self.args, self.configs, _clone=True)
return heartbeat
def sanitize(self):
"""Removes sensitive data including file names and dependencies.
Returns a Heartbeat.
"""
if self.entity is None:
return self
if self._should_obfuscate_filename():
self._sanitize_metadata(keys=self._sensitive_when_hiding_filename)
if self._should_obfuscate_branch(default=True):
self._sanitize_metadata(keys=self._sensitive_when_hiding_branch)
extension = u(os.path.splitext(self.entity)[1])
self.entity = u('HIDDEN{0}').format(extension)
elif self.should_obfuscate_project():
self._sanitize_metadata(keys=self._sensitive_when_hiding_filename)
if self._should_obfuscate_branch(default=True):
self._sanitize_metadata(keys=self._sensitive_when_hiding_branch)
elif self._should_obfuscate_branch():
self._sanitize_metadata(keys=self._sensitive_when_hiding_branch)
return self
def json(self):
return json.dumps(self.dict())
def dict(self):
return {
'time': self.time,
'entity': self._unicode(self.entity),
'type': self.type,
'category': self.category,
'is_write': self.is_write,
'project': self._unicode(self.project),
'branch': self._unicode(self.branch),
'language': self._unicode(self.language),
'dependencies': self._unicode_list(self.dependencies),
'lines': self.lines,
'lineno': self.lineno,
'cursorpos': self.cursorpos,
'user_agent': self._unicode(self.user_agent),
}
def items(self):
return self.dict().items()
def get_id(self):
return u('{time}-{type}-{category}-{project}-{branch}-{entity}-{is_write}').format(
time=self.time,
type=self.type,
category=self.category,
project=self._unicode(self.project),
branch=self._unicode(self.branch),
entity=self._unicode(self.entity),
is_write=self.is_write,
)
def should_obfuscate_project(self):
"""Returns True if hide_project_names is true or the entity file path
matches one in the list of obfuscated project paths."""
for pattern in self.args.hide_project_names:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity):
return True
except re.error as ex:
log.warning(u('Regex error ({msg}) for hide_project_names pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
return False
def _should_obfuscate_filename(self):
"""Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths."""
for pattern in self.args.hide_file_names:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity):
return True
except re.error as ex:
log.warning(u('Regex error ({msg}) for hide_file_names pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
return False
def _should_obfuscate_branch(self, default=False):
"""Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths."""
# when project names or file names are hidden and hide_branch_names is
# not set, we default to hiding branch names along with file/project.
if default and self.args.hide_branch_names is None:
return True
if not self.branch or not self.args.hide_branch_names:
return False
for pattern in self.args.hide_branch_names:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity) or compiled.search(self.branch):
return True
except re.error as ex:
log.warning(u('Regex error ({msg}) for hide_branch_names pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
return False
def _unicode(self, value):
if value is None:
return None
return u(value)
def _unicode_list(self, values):
if values is None:
return None
return [self._unicode(value) for value in values]
def _file_exists(self):
return (self.entity and os.path.isfile(self.entity) or
self.args.local_file and os.path.isfile(self.args.local_file))
def _format_local_file(self):
"""When args.local_file empty on Windows, tries to map args.entity to a
unc path.
Updates args.local_file in-place without returning anything.
"""
if self.type != 'file':
return
if not self.entity:
return
if not is_win:
return
if self._file_exists():
return
self.args.local_file = self._to_unc_path(self.entity)
def _to_unc_path(self, filepath):
drive, rest = self._splitdrive(filepath)
if not drive:
return filepath
stdout = None
try:
stdout, stderr = Popen(['net', 'use'], stdout=PIPE, stderr=PIPE).communicate()
except OSError:
pass
else:
if stdout:
cols = None
for line in stdout.strip().splitlines()[1:]:
line = u(line)
if not line.strip():
continue
if not cols:
cols = self._unc_columns(line)
continue
start, end = cols.get('local', (0, 0))
if not start and not end:
break
local = line[start:end].strip().split(':')[0].upper()
if not local.isalpha():
continue
if local == drive:
start, end = cols.get('remote', (0, 0))
if not start and not end:
break
remote = line[start:end].strip()
return remote + rest
return filepath
def _unc_columns(self, line):
cols = {}
current_col = u('')
newcol = False
start, end = 0, 0
for char in line:
if char.isalpha():
if newcol:
cols[current_col.strip().lower()] = (start, end)
current_col = u('')
start = end
newcol = False
current_col += u(char)
else:
newcol = True
end += 1
if start != end and current_col:
cols[current_col.strip().lower()] = (start, -1)
return cols
def _splitdrive(self, filepath):
if filepath[1:2] != ':' or not filepath[0].isalpha():
return None, filepath
return filepath[0].upper(), filepath[2:]
def _excluded_by_pattern(self):
return should_exclude(self.entity, self.args.include, self.args.exclude)
def _excluded_by_unknown_project(self):
if self.project:
return False
return self.args.exclude_unknown_project
def _excluded_by_missing_project_file(self):
if not self.args.include_only_with_project_file:
return False
return find_project_file(self.entity) is None
def _sanitize_metadata(self, keys=[]):
for key in keys:
setattr(self, key, None)
def __repr__(self):
return self.json()
def __bool__(self):
return not self.skip
def __nonzero__(self):
return self.__bool__()
def __getitem__(self, key):
return self.dict()[key]
| bsd-3-clause | 8,855,140,367,791,368,000 | 31.74026 | 102 | 0.527092 | false |
agaffney/ansible | test/lib/ansible_test/_internal/cloud/cs.py | 14 | 9685 | """CloudStack plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import time
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..util import (
find_executable,
ApplicationError,
display,
SubprocessError,
ConfigParser,
)
from ..http import (
HttpClient,
HttpError,
urlparse,
)
from ..docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
docker_network_inspect,
docker_exec,
get_docker_container_id,
get_docker_preferred_network_name,
get_docker_hostname,
is_docker_user_defined_network,
)
class CsCloudProvider(CloudProvider):
"""CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(CsCloudProvider, self).__init__(args)
self.image = os.environ.get('ANSIBLE_CLOUDSTACK_CONTAINER', 'quay.io/ansible/cloudstack-test-container:1.4.0')
self.container_name = ''
self.endpoint = ''
self.host = ''
self.port = 0
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
% (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(CsCloudProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_remote_ssh_options(self):
"""Get any additional options needed when delegating tests to a remote instance via SSH.
:rtype: list[str]
"""
if self.managed:
return ['-R', '8888:%s:8888' % get_docker_hostname()]
return []
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
network = get_docker_preferred_network_name(self.args)
if self.managed and not is_docker_user_defined_network(network):
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
if self.ci_provider.code:
docker_rm(self.args, self.container_name)
elif not self.args.explain:
display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
super(CsCloudProvider, self).cleanup()
def _setup_static(self):
"""Configure CloudStack tests for use with static configuration."""
parser = ConfigParser()
parser.read(self.config_static_path)
self.endpoint = parser.get('cloudstack', 'endpoint')
parts = urlparse(self.endpoint)
self.host = parts.hostname
if not self.host:
raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
if parts.port:
self.port = parts.port
elif parts.scheme == 'http':
self.port = 80
elif parts.scheme == 'https':
self.port = 443
else:
raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
self._wait_for_service()
def _setup_dynamic(self):
"""Create a CloudStack simulator using docker."""
config = self._read_config_template()
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0]['State']['Running']:
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
else:
display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
docker_pull(self.args, self.image)
docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
# apply work-around for OverlayFS issue
# https://github.com/docker/for-linux/issues/72#issuecomment-319904698
docker_exec(self.args, self.container_name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'])
if not self.args.explain:
display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.')
container_id = get_docker_container_id()
if container_id:
self.host = self._get_simulator_address()
display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
else:
self.host = get_docker_hostname()
self.port = 8888
self.endpoint = 'http://%s:%d' % (self.host, self.port)
self._wait_for_service()
if self.args.explain:
values = dict(
HOST=self.host,
PORT=str(self.port),
)
else:
credentials = self._get_credentials()
if self.args.docker:
host = self.DOCKER_SIMULATOR_NAME
elif self.args.remote:
host = 'localhost'
else:
host = self.host
values = dict(
HOST=host,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],
)
display.sensitive.add(values['SECRET'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _get_simulator_address(self):
current_network = get_docker_preferred_network_name(self.args)
networks = docker_network_inspect(self.args, current_network)
try:
network = [network for network in networks if network['Name'] == current_network][0]
containers = network['Containers']
container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
except Exception:
display.error('Failed to process the following docker network inspect output:\n%s' %
json.dumps(networks, indent=4, sort_keys=True))
raise
def _wait_for_service(self):
"""Wait for the CloudStack service endpoint to accept connections."""
if self.args.explain:
return
client = HttpClient(self.args, always=True)
endpoint = self.endpoint
for _iteration in range(1, 30):
display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
try:
client.get(endpoint)
return
except SubprocessError:
pass
time.sleep(10)
raise ApplicationError('Timeout waiting for CloudStack service.')
def _get_credentials(self):
"""Wait for the CloudStack simulator to return credentials.
:rtype: dict[str, str]
"""
client = HttpClient(self.args, always=True)
endpoint = '%s/admin.json' % self.endpoint
for _iteration in range(1, 30):
display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
response = client.get(endpoint)
if response.status_code == 200:
try:
return response.json()
except HttpError as ex:
display.error(ex)
time.sleep(10)
raise ApplicationError('Timeout waiting for CloudStack credentials.')
class CsCloudEnvironment(CloudEnvironment):
"""CloudStack cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
config = dict(parser.items('default'))
env_vars = dict(
CLOUDSTACK_ENDPOINT=config['endpoint'],
CLOUDSTACK_KEY=config['key'],
CLOUDSTACK_SECRET=config['secret'],
CLOUDSTACK_TIMEOUT=config['timeout'],
)
display.sensitive.add(env_vars['CLOUDSTACK_SECRET'])
ansible_vars = dict(
cs_resource_prefix=self.resource_prefix,
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| gpl-3.0 | -6,335,387,217,628,617,000 | 31.391304 | 141 | 0.591843 | false |
UPB-FILS/SE | TD1/bonjour_files.py | 1 | 2101 | """
Le premier prgramme en Python
* utilisation des arguments de la lignne de commande
* les listes et la fonction map
* les threads
* le logger
@author Dragos STOICA
@version 0.4
@date 16.feb.2014
"""
import sys, threading, logging, os
class Bonjour(threading.Thread):
def __init__(self, personne):
threading.Thread.__init__(self)
self.personne = personne
def run(self):
#Fonction polie - saluer une personne
print "Bonjour %(personne)s!\n" % \
{"personne":self.personne},
logging.info("Bonjour : %(personne)s" %{"personne":self.personne})
def utilisation():
#Affichage mode d'utilisation
print """
Le programme doit etre appelle avec minimum 1 argument:
python bonjour_listes.py Dragos
"""
def main(argv=None):
working_dir = os.path.dirname(os.path.abspath(__file__)) + os.path.sep
#Configurez le logging pour ecrire dans un fichier texte
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
filename = working_dir + 'bonjour.log',
level=logging.INFO)
logging.info("Main start")
#La boucle principale
if argv is None:
argv = sys.argv
if len(argv) == 1:
utilisation()
else:
#Argument 1 est le nom de fichier avec un noms per ligne
mmeThread = []
mThread = []
with open(working_dir + argv[1],'r') as f:
#Dites bonjour a chaque personne de fichier
for ligne in f:
if ligne[0:2] == "M.":
mThread.append(Bonjour(ligne.strip(' \r\n')))
else:
mme_local = Bonjour(ligne.strip(' \r\n'))
mmeThread.append(mme_local)
mme_local.start()
for mme in mmeThread:
mme.join()
for m in mThread:
m.start()
m.join()
logging.info("Main stop")
return 0
if __name__ == "__main__":
#Simplifiez la logique de la fonction principale
sys.exit(main()) | apache-2.0 | -8,752,712,077,449,837,000 | 29.463768 | 74 | 0.562589 | false |
bugzPDX/airmozilla | airmozilla/main/migrations/0057_auto.py | 6 | 29537 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field participants on 'Event'
db.delete_table('main_event_participants')
def backwards(self, orm):
# Adding M2M table for field participants on 'Event'
db.create_table(u'main_event_participants', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm[u'main.event'], null=False)),
('participant', models.ForeignKey(orm[u'main.participant'], null=False))
))
db.create_unique(u'main_event_participants', ['event_id', 'participant_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.approval': {
'Meta': {'object_name': 'Approval'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'processed_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'main.channel': {
'Meta': {'ordering': "['name']", 'object_name': 'Channel'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'description': ('django.db.models.fields.TextField', [], {}),
'exclude_from_trending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_is_banner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Channel']", 'null': 'True'}),
'reverse_order': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'main.curatedgroup': {
'Meta': {'object_name': 'CuratedGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
u'main.event': {
'Meta': {'object_name': 'Event'},
'additional_links': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'archive_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'call_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Channel']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Location']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_user'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'mozillian': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_picture'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['main.Picture']"}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'placeholder_img': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'popcorn_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '40', 'db_index': 'True'}),
'recruitmentmessage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.RecruitmentMessage']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'remote_presenters': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '215', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'initiated'", 'max_length': '20', 'db_index': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Template']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'template_environment': ('airmozilla.main.fields.EnvironmentField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'transcript': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'upload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_upload'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['uploads.Upload']"})
},
u'main.eventassignment': {
'Meta': {'object_name': 'EventAssignment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Location']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'main.eventhitstats': {
'Meta': {'object_name': 'EventHitStats'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'shortcode': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'total_hits': ('django.db.models.fields.IntegerField', [], {})
},
u'main.eventoldslug': {
'Meta': {'object_name': 'EventOldSlug'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '215'})
},
u'main.eventrevision': {
'Meta': {'object_name': 'EventRevision'},
'additional_links': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'call_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Channel']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'description': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Picture']", 'null': 'True', 'blank': 'True'}),
'placeholder_img': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'recruitmentmessage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.RecruitmentMessage']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'main.eventtweet': {
'Meta': {'object_name': 'EventTweet'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_placeholder': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'send_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'sent_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'tweet_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'main.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Region']", 'symmetrical': 'False', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'main.locationdefaultenvironment': {
'Meta': {'unique_together': "(('location', 'privacy', 'template'),)", 'object_name': 'LocationDefaultEnvironment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Location']"}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '40'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Template']"}),
'template_environment': ('airmozilla.main.fields.EnvironmentField', [], {})
},
u'main.participant': {
'Meta': {'object_name': 'Participant'},
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'clear_token': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'cleared': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '15', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'participant_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '65', 'blank': 'True'}),
'team': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'topic_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'main.picture': {
'Meta': {'object_name': 'Picture'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'picture_event'", 'null': 'True', 'to': u"orm['main.Event']"}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'size': ('django.db.models.fields.PositiveIntegerField', [], {}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'main.recruitmentmessage': {
'Meta': {'ordering': "['text']", 'object_name': 'RecruitmentMessage'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'main.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'main.suggestedevent': {
'Meta': {'object_name': 'SuggestedEvent'},
'accepted': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']", 'null': 'True', 'blank': 'True'}),
'additional_links': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'call_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Channel']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'description': ('django.db.models.fields.TextField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Location']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Participant']", 'symmetrical': 'False'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Picture']", 'null': 'True', 'blank': 'True'}),
'placeholder_img': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'popcorn_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '40'}),
'remote_presenters': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'review_comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '215', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'created'", 'max_length': '40'}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'upcoming': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'upload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'upload'", 'null': 'True', 'to': u"orm['uploads.Upload']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'main.suggestedeventcomment': {
'Meta': {'object_name': 'SuggestedEventComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'suggested_event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.SuggestedEvent']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'main.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'main.template': {
'Meta': {'ordering': "['name']", 'object_name': 'Template'},
'content': ('django.db.models.fields.TextField', [], {}),
'default_archive_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'default_popcorn_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.urlmatch': {
'Meta': {'object_name': 'URLMatch'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'string': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'use_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'main.urltransform': {
'Meta': {'object_name': 'URLTransform'},
'find': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.URLMatch']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'replace_with': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'contributor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'main.vidlysubmission': {
'Meta': {'object_name': 'VidlySubmission'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Event']"}),
'hd': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submission_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'submission_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'token_protection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'uploads.upload': {
'Meta': {'object_name': 'Upload'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event'", 'null': 'True', 'to': u"orm['main.Event']"}),
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.BigIntegerField', [], {}),
'suggested_event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suggested_event'", 'null': 'True', 'to': u"orm['main.SuggestedEvent']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['main'] | bsd-3-clause | 6,270,718,778,250,254,000 | 84.617391 | 209 | 0.550124 | false |
afandria/mojo | mojo/tools/mojob.py | 7 | 10907 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple script to make building/testing Mojo components easier."""
import argparse
from copy import deepcopy
import logging
from multiprocessing import cpu_count
import os
import subprocess
import sys
from get_test_list import GetTestList
from mopy.config import Config
from mopy.paths import Paths
from mopy.gn import GNArgsForConfig, ParseGNConfig, CommandLineForGNArgs
from mopy.log import InitLogging
_logger = logging.getLogger()
_verbose_count = 0
def _args_to_config(args):
# Default to host OS.
target_os = None
if args.android:
target_os = Config.OS_ANDROID
elif args.ios:
target_os = Config.OS_IOS
target_cpu = args.target_cpu
additional_args = {}
if 'clang' in args:
additional_args['is_clang'] = args.clang
if 'asan' in args and args.asan:
additional_args['sanitizer'] = Config.SANITIZER_ASAN
# Additional non-standard config entries:
if 'goma' in args:
goma_dir = os.environ.get('GOMA_DIR')
goma_home_dir = os.path.join(os.getenv('HOME', ''), 'goma')
if args.goma and goma_dir:
additional_args['use_goma'] = True
additional_args['goma_dir'] = goma_dir
elif args.goma and os.path.exists(goma_home_dir):
additional_args['use_goma'] = True
additional_args['goma_dir'] = goma_home_dir
else:
additional_args['use_goma'] = False
additional_args['goma_dir'] = None
if 'nacl' in args:
additional_args['use_nacl'] = args.nacl
if not ('asan' in args and args.asan):
go_dir = os.path.join(Paths().src_root, 'third_party', 'go', 'tool')
if args.android:
additional_args['mojo_use_go'] = True
additional_args['go_build_tool'] = os.path.join(
go_dir, 'android_arm', 'bin', 'go')
elif target_os is None and Config.GetHostOS() == Config.OS_LINUX:
additional_args['mojo_use_go'] = True
additional_args['go_build_tool'] = os.path.join(
go_dir, 'linux_amd64', 'bin', 'go')
if 'dry_run' in args:
additional_args['dry_run'] = args.dry_run
if 'builder_name' in args:
additional_args['builder_name'] = args.builder_name
if 'build_number' in args:
additional_args['build_number'] = args.build_number
if 'master_name' in args:
additional_args['master_name'] = args.master_name
if 'test_results_server' in args:
additional_args['test_results_server'] = args.test_results_server
if 'gn_args' in args:
additional_args['gn_args'] = args.gn_args
is_debug = args.debug and not args.official
return Config(target_os=target_os, target_cpu=target_cpu,
is_debug=is_debug, is_official_build=args.official,
dcheck_always_on=args.dcheck_always_on,
is_simulator=args.simulator, **additional_args)
def _get_out_dir(config):
"""Gets the build output directory (e.g., out/Debug), relative to src, for the
given config."""
paths = Paths(config)
return paths.SrcRelPath(paths.build_dir)
def _sync(config): # pylint: disable=W0613
"""Runs gclient sync for the given config."""
_logger.debug('_sync()')
return subprocess.call(['gclient', 'sync'])
def _gn(config):
"""Runs gn gen for the given config."""
_logger.debug('_gn()')
command = ['gn', 'gen', '--check']
gn_args = CommandLineForGNArgs(GNArgsForConfig(config))
out_dir = _get_out_dir(config)
command.append(out_dir)
command.append('--args=%s' % ' '.join(gn_args))
print 'Running %s %s ...' % (command[0],
' '.join('\'%s\'' % x for x in command[1:]))
return subprocess.call(command)
def _build(config):
"""Builds for the given config."""
_logger.debug('_build()')
out_dir = _get_out_dir(config)
gn_args = ParseGNConfig(out_dir)
print 'Building in %s ...' % out_dir
if gn_args.get('use_goma'):
# Use the configured goma directory.
local_goma_dir = gn_args.get('goma_dir')
print 'Ensuring goma (in %s) started ...' % local_goma_dir
command = ['python',
os.path.join(local_goma_dir, 'goma_ctl.py'),
'ensure_start']
exit_code = subprocess.call(command)
if exit_code:
return exit_code
# Goma allows us to run many more jobs in parallel, say 32 per core/thread
# (= 1024 on a 16-core, 32-thread Z620). Limit the load average to 4 per
# core/thread (= 128 on said Z620).
jobs = cpu_count() * 32
limit = cpu_count() * 4
return subprocess.call(['ninja', '-j', str(jobs), '-l', str(limit),
'-C', out_dir])
else:
return subprocess.call(['ninja', '-C', out_dir])
def _run_tests(config, test_types):
"""Runs the tests of the given type(s) for the given config."""
assert isinstance(test_types, list)
config = deepcopy(config)
config.values['test_types'] = test_types
test_list = GetTestList(config, verbose_count=_verbose_count)
dry_run = config.values.get('dry_run')
final_exit_code = 0
failure_list = []
for entry in test_list:
print 'Running: %s' % entry['name']
print 'Command: %s' % ' '.join(entry['command'])
if dry_run:
continue
_logger.info('Starting: %s' % ' '.join(entry['command']))
exit_code = subprocess.call(entry['command'])
_logger.info('Completed: %s' % ' '.join(entry['command']))
if exit_code:
if not final_exit_code:
final_exit_code = exit_code
failure_list.append(entry['name'])
print 72 * '='
print 'SUMMARY:',
if dry_run:
print 'Dry run: no tests run'
elif not failure_list:
assert not final_exit_code
print 'All tests passed'
else:
assert final_exit_code
print 'The following had failures:', ', '.join(failure_list)
return final_exit_code
def _test(config):
_logger.debug('_test()')
return _run_tests(config, [Config.TEST_TYPE_DEFAULT])
def _perftest(config):
_logger.debug('_perftest()')
return _run_tests(config, [Config.TEST_TYPE_PERF])
def _pytest(config):
_logger.debug('_pytest()')
return _run_tests(config, ['python'])
def main():
os.chdir(Paths().src_root)
parser = argparse.ArgumentParser(description='A script to make building'
'/testing Mojo components easier.')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('--verbose',
help='Be verbose (multiple times for more)',
default=0, dest='verbose_count', action='count')
parent_parser.add_argument('--asan', help='Use Address Sanitizer',
action='store_true')
parent_parser.add_argument('--dcheck_always_on',
help='DCHECK and MOJO_DCHECK are fatal even in '
'release builds',
action='store_true')
debug_group = parent_parser.add_mutually_exclusive_group()
debug_group.add_argument('--debug', help='Debug build (default)',
default=True, action='store_true')
debug_group.add_argument('--release', help='Release build', default=False,
dest='debug', action='store_false')
# The official build is a release build suitable for distribution, with a
# different package name.
debug_group.add_argument('--official', help='Official build', default=False,
dest='official', action='store_true')
os_group = parent_parser.add_mutually_exclusive_group()
os_group.add_argument('--android', help='Build for Android',
action='store_true')
os_group.add_argument('--ios', help='Build for iOS',
action='store_true')
parent_parser.add_argument('--simulator',
help='Build for a simulator of the target',
action='store_true')
parent_parser.add_argument('--target-cpu',
help='CPU architecture to build for.',
choices=['x64', 'x86', 'arm'])
subparsers = parser.add_subparsers()
sync_parser = subparsers.add_parser('sync', parents=[parent_parser],
help='Sync using gclient (does not run gn).')
sync_parser.set_defaults(func=_sync)
gn_parser = subparsers.add_parser('gn', parents=[parent_parser],
help='Run gn for mojo (does not sync).')
gn_parser.set_defaults(func=_gn)
gn_parser.add_argument('--args', help='Specify extra args',
default=None, dest='gn_args')
# Note: no default, if nothing is specified on the command line GN decides.
gn_parser.add_argument('--nacl', help='Add in NaCl', action='store_true',
default=argparse.SUPPRESS)
gn_parser.add_argument('--no-nacl', help='Remove NaCl', action='store_false',
default=argparse.SUPPRESS, dest='nacl')
clang_group = gn_parser.add_mutually_exclusive_group()
clang_group.add_argument('--clang', help='Use Clang (default)', default=None,
action='store_true')
clang_group.add_argument('--gcc', help='Use GCC',
dest='clang', action='store_false')
goma_group = gn_parser.add_mutually_exclusive_group()
goma_group.add_argument('--goma',
help='Use Goma (if $GOMA_DIR is set or $HOME/goma '
'exists; default)',
default=True,
action='store_true')
goma_group.add_argument('--no-goma', help='Don\'t use Goma', default=False,
dest='goma', action='store_false')
build_parser = subparsers.add_parser('build', parents=[parent_parser],
help='Build')
build_parser.set_defaults(func=_build)
test_parser = subparsers.add_parser('test', parents=[parent_parser],
help='Run unit tests (does not build).')
test_parser.set_defaults(func=_test)
test_parser.add_argument('--dry-run',
help='Print instead of executing commands',
default=False, action='store_true')
perftest_parser = subparsers.add_parser('perftest', parents=[parent_parser],
help='Run perf tests (does not build).')
perftest_parser.set_defaults(func=_perftest)
pytest_parser = subparsers.add_parser('pytest', parents=[parent_parser],
help='Run Python unit tests (does not build).')
pytest_parser.set_defaults(func=_pytest)
args = parser.parse_args()
global _verbose_count
_verbose_count = args.verbose_count
InitLogging(_verbose_count)
if args.simulator and not args.ios:
sys.exit("Currently, the simulator target is only configured for iOS")
return args.func(_args_to_config(args))
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -4,215,046,015,450,439,700 | 33.298742 | 80 | 0.617768 | false |
thnee/ansible | lib/ansible/modules/cloud/docker/docker_stack.py | 10 | 10485 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Dario Zanzico ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: docker_stack
author: "Dario Zanzico (@dariko)"
short_description: docker stack module
description:
- Manage docker stacks using the 'docker stack' command
on the target node (see examples).
version_added: "2.8"
options:
name:
description:
- Stack name
type: str
required: yes
state:
description:
- Service state.
type: str
default: "present"
choices:
- present
- absent
compose:
description:
- List of compose definitions. Any element may be a string
referring to the path of the compose file on the target host
or the YAML contents of a compose file nested as dictionary.
type: list
# elements: raw
default: []
prune:
description:
- If true will add the C(--prune) option to the C(docker stack deploy) command.
This will have docker remove the services not present in the
current stack definition.
type: bool
default: no
with_registry_auth:
description:
- If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
This will have docker send registry authentication details to Swarm agents.
type: bool
default: no
resolve_image:
description:
- If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
This will have docker query the registry to resolve image digest and
supported platforms. If not set, docker use "always" by default.
type: str
choices: ["always", "changed", "never"]
absent_retries:
description:
- If C(>0) and I(state) is C(absent) the module will retry up to
I(absent_retries) times to delete the stack until all the
resources have been effectively deleted.
If the last try still reports the stack as not completely
removed the module will fail.
type: int
default: 0
absent_retries_interval:
description:
- Interval in seconds between consecutive I(absent_retries).
type: int
default: 1
requirements:
- jsondiff
- pyyaml
notes:
- Return values I(out) and I(err) have been deprecated and will be removed in Ansible 2.14. Use I(stdout) and I(stderr) instead.
'''
RETURN = '''
stack_spec_diff:
description: |
dictionary containing the differences between the 'Spec' field
of the stack services before and after applying the new stack
definition.
sample: >
"stack_spec_diff":
{'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
returned: on change
type: dict
'''
EXAMPLES = '''
- name: Deploy stack from a compose file
docker_stack:
state: present
name: mystack
compose:
- /opt/docker-compose.yml
- name: Deploy stack from base compose file and override the web service
docker_stack:
state: present
name: mystack
compose:
- /opt/docker-compose.yml
- version: '3'
services:
web:
image: nginx:latest
environment:
ENVVAR: envvar
- name: Remove stack
docker_stack:
name: mystack
state: absent
'''
import json
import tempfile
from ansible.module_utils.six import string_types
from time import sleep
try:
from jsondiff import diff as json_diff
HAS_JSONDIFF = True
except ImportError:
HAS_JSONDIFF = False
try:
from yaml import dump as yaml_dump
HAS_YAML = True
except ImportError:
HAS_YAML = False
from ansible.module_utils.basic import AnsibleModule, os
def docker_stack_services(module, stack_name):
docker_bin = module.get_bin_path('docker', required=True)
rc, out, err = module.run_command([docker_bin,
"stack",
"services",
stack_name,
"--format",
"{{.Name}}"])
if err == "Nothing found in stack: %s\n" % stack_name:
return []
return out.strip().split('\n')
def docker_service_inspect(module, service_name):
docker_bin = module.get_bin_path('docker', required=True)
rc, out, err = module.run_command([docker_bin,
"service",
"inspect",
service_name])
if rc != 0:
return None
else:
ret = json.loads(out)[0]['Spec']
return ret
def docker_stack_deploy(module, stack_name, compose_files):
docker_bin = module.get_bin_path('docker', required=True)
command = [docker_bin, "stack", "deploy"]
if module.params["prune"]:
command += ["--prune"]
if module.params["with_registry_auth"]:
command += ["--with-registry-auth"]
if module.params["resolve_image"]:
command += ["--resolve-image",
module.params["resolve_image"]]
for compose_file in compose_files:
command += ["--compose-file",
compose_file]
command += [stack_name]
return module.run_command(command)
def docker_stack_inspect(module, stack_name):
ret = {}
for service_name in docker_stack_services(module, stack_name):
ret[service_name] = docker_service_inspect(module, service_name)
return ret
def docker_stack_rm(module, stack_name, retries, interval):
docker_bin = module.get_bin_path('docker', required=True)
command = [docker_bin, "stack", "rm", stack_name]
rc, out, err = module.run_command(command)
while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
sleep(interval)
retries = retries - 1
rc, out, err = module.run_command(command)
return rc, out, err
def main():
module = AnsibleModule(
argument_spec={
'name': dict(type='str', required=True),
'compose': dict(type='list', elements='raw', default=[]),
'prune': dict(type='bool', default=False),
'with_registry_auth': dict(type='bool', default=False),
'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
'state': dict(type='str', default='present', choices=['present', 'absent']),
'absent_retries': dict(type='int', default=0),
'absent_retries_interval': dict(type='int', default=1)
},
supports_check_mode=False
)
if not HAS_JSONDIFF:
return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
if not HAS_YAML:
return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
state = module.params['state']
compose = module.params['compose']
name = module.params['name']
absent_retries = module.params['absent_retries']
absent_retries_interval = module.params['absent_retries_interval']
if state == 'present':
if not compose:
module.fail_json(msg=("compose parameter must be a list "
"containing at least one element"))
compose_files = []
for i, compose_def in enumerate(compose):
if isinstance(compose_def, dict):
compose_file_fd, compose_file = tempfile.mkstemp()
module.add_cleanup_file(compose_file)
with os.fdopen(compose_file_fd, 'w') as stack_file:
compose_files.append(compose_file)
stack_file.write(yaml_dump(compose_def))
elif isinstance(compose_def, string_types):
compose_files.append(compose_def)
else:
module.fail_json(msg="compose element '%s' must be a " +
"string or a dictionary" % compose_def)
before_stack_services = docker_stack_inspect(module, name)
rc, out, err = docker_stack_deploy(module, name, compose_files)
after_stack_services = docker_stack_inspect(module, name)
if rc != 0:
module.fail_json(msg="docker stack up deploy command failed",
rc=rc,
out=out, err=err, # Deprecated
stdout=out, stderr=err)
before_after_differences = json_diff(before_stack_services,
after_stack_services)
for k in before_after_differences.keys():
if isinstance(before_after_differences[k], dict):
before_after_differences[k].pop('UpdatedAt', None)
before_after_differences[k].pop('Version', None)
if not list(before_after_differences[k].keys()):
before_after_differences.pop(k)
if not before_after_differences:
module.exit_json(
changed=False,
rc=rc,
stdout=out,
stderr=err)
else:
module.exit_json(
changed=True,
rc=rc,
stdout=out,
stderr=err,
stack_spec_diff=json_diff(before_stack_services,
after_stack_services,
dump=True))
else:
if docker_stack_services(module, name):
rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
if rc != 0:
module.fail_json(msg="'docker stack down' command failed",
rc=rc,
out=out, err=err, # Deprecated
stdout=out, stderr=err)
else:
module.exit_json(changed=True,
msg=out, rc=rc,
err=err, # Deprecated
stdout=out, stderr=err)
module.exit_json(changed=False)
if __name__ == "__main__":
main()
| gpl-3.0 | 3,972,492,489,725,335,600 | 32.498403 | 130 | 0.570243 | false |
charles-cooper/raiden | raiden/utils/__init__.py | 1 | 4637 | # -*- coding: utf-8 -*-
import os
import re
import sys
import string
import random
from coincurve import PrivateKey
from Crypto.Hash import keccak as keccaklib
from ethereum.utils import sha3
from ethereum.utils import remove_0x_head
import raiden
__all__ = (
'sha3',
'keccak_256',
'keccak',
'ishash',
'isaddress',
'make_address',
'make_privkey_address',
'publickey_to_address',
'privatekey_to_address',
'pex',
'lpex',
'get_contract_path',
'safe_lstrip_hex',
'camel_to_snake_case'
)
LETTERS = string.printable
# From the secp256k1 header file:
#
# The purpose of context structures is to cache large precomputed data tables
# that are expensive to construct, and also to maintain the randomization data
# for blinding.
#
# Do not create a new context object for each operation, as construction is
# far slower than all other API calls (~100 times slower than an ECDSA
# verification).
#
# A constructed context can safely be used from multiple threads
# simultaneously, but API call that take a non-const pointer to a context
# need exclusive access to it. In particular this is the case for
# secp256k1_context_destroy and secp256k1_context_randomize.
#
# Regarding randomization, either do it once at creation time (in which case
# you do not need any locking for the other calls), or use a read-write lock.
#
def safe_address_decode(address):
try:
address = safe_lstrip_hex(address)
address = address.decode('hex')
except TypeError:
pass
return address
def keccak_256(data):
return keccaklib.new(digest_bits=256, data=data)
def keccak(seed):
return keccak_256(seed).digest()
def ishash(data):
return isinstance(data, (bytes, bytearray)) and len(data) == 32
def isaddress(data):
return isinstance(data, (bytes, bytearray)) and len(data) == 20
def make_address():
return bytes(''.join(random.choice(LETTERS) for _ in range(20)))
def make_privkey_address():
private_key_bin = sha3(''.join(random.choice(LETTERS) for _ in range(20)))
privkey = PrivateKey(private_key_bin)
pubkey = privkey.public_key.format(compressed=False)
address = publickey_to_address(pubkey)
return privkey, address
def pex(data):
return str(data).encode('hex')[:8]
def lpex(lst):
return [pex(l) for l in lst]
def activate_ultratb():
from IPython.core import ultratb
sys.excepthook = ultratb.VerboseTB(call_pdb=True, tb_offset=6)
def host_port_to_endpoint(host, port):
return "{}:{}".format(host, port)
def split_endpoint(endpoint):
host, port = endpoint.split(':')[:2]
port = int(port)
return (host, port)
def publickey_to_address(publickey):
return sha3(publickey[1:])[12:]
def privatekey_to_address(private_key_bin):
private_key = PrivateKey(private_key_bin)
pubkey = private_key.public_key.format(compressed=False)
return publickey_to_address(pubkey)
def get_project_root():
return os.path.dirname(raiden.__file__)
def get_contract_path(contract_name):
contract_path = os.path.join(
get_project_root(),
'smart_contracts',
contract_name
)
return os.path.realpath(contract_path)
def safe_lstrip_hex(val):
if isinstance(val, basestring):
return remove_0x_head(val)
return val
def get_encoded_transfers(their_transfer, our_transfer):
"""Check for input sanity and return the encoded version of the transfers"""
if not their_transfer and our_transfer:
raise ValueError(
"There is no reason to provide our_transfer when their_transfer"
" is not provided"
)
their_encoded = their_transfer.encode() if their_transfer else ""
our_encoded = our_transfer.encode() if our_transfer else ""
return their_encoded, our_encoded
def camel_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snake_to_camel_case(snake_string):
return snake_string.title().replace("_", "")
def channel_to_api_dict(channel):
"""Takes in a Channel Object and turns it into a dictionary for
usage in the REST API. Decoding from binary to hex happens through
the marshmallow AddressField in encoding.py.
"""
return {
"channel_address": channel.channel_address,
"token_address": channel.token_address,
"partner_address": channel.partner_address,
"settle_timeout": channel.settle_timeout,
"balance": channel.contract_balance,
"state": channel.state
}
| mit | 6,570,371,787,523,456,000 | 25.19774 | 82 | 0.671986 | false |
omnirom/android_external_chromium-org | tools/telemetry/telemetry/page/page_test_unittest.py | 33 | 6225 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import wpr_modes
from telemetry.page import page as page_module
from telemetry.page import page_set
from telemetry.page import page_set_archive_info
from telemetry.page import page_test
from telemetry.unittest import options_for_unittests
from telemetry.unittest import page_test_test_case
from telemetry.value import scalar
class PageTestThatFails(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
raise exceptions.IntentionalException
class PageTestThatHasDefaults(page_test.PageTest):
def AddCommandLineArgs(self, parser):
parser.add_option('-x', dest='x', default=3)
def ValidateAndMeasurePage(self, page, tab, results):
if not hasattr(self.options, 'x'):
raise page_test.MeasurementFailure('Default option was not set.')
if self.options.x != 3:
raise page_test.MeasurementFailure(
'Expected x == 3, got x == ' + self.options.x)
results.AddValue(scalar.ScalarValue(page, 'x', 'ms', 7))
class PageTestForBlank(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
contents = tab.EvaluateJavaScript('document.body.textContent')
if contents.strip() != 'Hello world':
raise page_test.MeasurementFailure(
'Page contents were: ' + contents)
class PageTestForReplay(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
# Web Page Replay returns '404 Not found' if a page is not in the archive.
contents = tab.EvaluateJavaScript('document.body.textContent')
if '404 Not Found' in contents.strip():
raise page_test.MeasurementFailure('Page not in archive.')
class PageTestQueryParams(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
query = tab.EvaluateJavaScript('window.location.search')
expected = '?foo=1'
if query.strip() != expected:
raise page_test.MeasurementFailure(
'query was %s, not %s.' % (query, expected))
class PageTestWithAction(page_test.PageTest):
def __init__(self):
super(PageTestWithAction, self).__init__('RunTestAction')
def ValidateAndMeasurePage(self, page, tab, results):
pass
class PageWithAction(page_module.Page):
def __init__(self, url, ps):
super(PageWithAction, self).__init__(url, ps, ps.base_dir)
self.run_test_action_called = False
def RunTestAction(self, _):
self.run_test_action_called = True
class PageTestUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
def testGotToBlank(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
measurement = PageTestForBlank()
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
def testGotQueryParams(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html?foo=1')
measurement = PageTestQueryParams()
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
def testFailure(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
measurement = PageTestThatFails()
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(1, len(all_results.failures))
def testDefaults(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
measurement = PageTestThatHasDefaults()
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(len(all_results.all_page_specific_values), 1)
self.assertEquals(
all_results.all_page_specific_values[0].value, 7)
# This test is disabled because it runs against live sites, and needs to be
# fixed. crbug.com/179038
@benchmark.Disabled
def testRecordAndReplay(self):
test_archive = '/tmp/google.wpr'
google_url = 'http://www.google.com/'
foo_url = 'http://www.foo.com/'
archive_info_template = ("""
{
"archives": {
"%s": ["%s"]
}
}
""")
try:
ps = page_set.PageSet()
measurement = PageTestForReplay()
# First record an archive with only www.google.com.
self._options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo(
'', '', json.loads(archive_info_template %
(test_archive, google_url)))
ps.pages = [page_module.Page(google_url, ps)]
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
# Now replay it and verify that google.com is found but foo.com is not.
self._options.browser_options.wpr_mode = wpr_modes.WPR_REPLAY
ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo(
'', '', json.loads(archive_info_template % (test_archive, foo_url)))
ps.pages = [page_module.Page(foo_url, ps)]
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(1, len(all_results.failures))
ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo(
'', '', json.loads(archive_info_template %
(test_archive, google_url)))
ps.pages = [page_module.Page(google_url, ps)]
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
self.assertTrue(os.path.isfile(test_archive))
finally:
if os.path.isfile(test_archive):
os.remove(test_archive)
def testRunActions(self):
ps = self.CreateEmptyPageSet()
page = PageWithAction('file://blank.html', ps)
ps.AddPage(page)
measurement = PageTestWithAction()
self.RunMeasurement(measurement, ps, options=self._options)
self.assertTrue(page.run_test_action_called)
| bsd-3-clause | -688,658,074,089,056,600 | 36.053571 | 79 | 0.706506 | false |
ioram7/keystone-federado-pgid2013 | build/sqlalchemy/lib/sqlalchemy/orm/persistence.py | 17 | 30463 | # orm/persistence.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from sqlalchemy import sql, util, exc as sa_exc
from sqlalchemy.orm import attributes, sync, \
exc as orm_exc
from sqlalchemy.orm.util import _state_mapper, state_str
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.iteritems():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.iteritems():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(table_to_mapper.keys()):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col.key] = mapper.version_id_generator(None)
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
params[col.key] = mapper.version_id_generator(
params[col._label])
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.itervalues():
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise sa_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise sa_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=mapper.version_id_col.type))
return table.update(clause)
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c.context.prefetch_cols,
c.context.postfetch_cols,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks), \
records in groupby(insert,
lambda rec: (rec[4],
rec[2].keys(),
bool(rec[5]),
rec[6])
):
if has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper,
conn, value_params, has_all_pks), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c.context.prefetch_cols,
c.context.postfetch_cols,
last_inserted_params,
value_params)
else:
for state, state_dict, params, mapper, \
connection, value_params, \
has_all_pks in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
result.context.prefetch_cols,
result.context.postfetch_cols,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], rec[2].keys())
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.iteritems():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
if need_version_id:
# TODO: need test coverage for this [ticket:1761]
if connection.dialect.supports_sane_rowcount:
rows = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows += c.rowcount
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched." %
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state.expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled,
# refresh whatever has been expired.
if base_mapper.eager_defaults and state.unloaded:
state.key = base_mapper._identity_key_from_state(state)
uowtransaction.session.query(base_mapper)._load_on_ident(
state.key, refresh_state=state,
only_load_props=state.unloaded)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, prefetch_cols, postfetch_cols,
params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state.expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn:conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q:q.key[1])
| apache-2.0 | -1,124,312,332,332,324,600 | 38.105263 | 84 | 0.52119 | false |
sebdelsol/pyload | module/plugins/hoster/MyvideoDe.py | 1 | 1416 | # -*- coding: utf-8 -*-
import re
from module.plugins.Hoster import Hoster
from module.unescape import unescape
class MyvideoDe(Hoster):
__name__ = "MyvideoDe"
__type__ = "hoster"
__version__ = "0.9"
__pattern__ = r'http://(?:www\.)?myvideo\.de/watch/'
__description__ = """Myvideo.de hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "[email protected]")]
def process(self, pyfile):
self.pyfile = pyfile
self.download_html()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
self.html = self.load(self.pyfile.url)
def get_file_url(self):
videoId = re.search(r"addVariable\('_videoid','(.*)'\);p.addParam\('quality'", self.html).group(1)
videoServer = re.search("rel='image_src' href='(.*)thumbs/.*' />", self.html).group(1)
file_url = videoServer + videoId + ".flv"
return file_url
def get_file_name(self):
file_name_pattern = r'<h1 class=\'globalHd\'>(.*)</h1>'
return unescape(re.search(file_name_pattern, self.html).group(1).replace("/", "") + '.flv')
def file_exists(self):
self.download_html()
self.load(str(self.pyfile.url), cookies=False, just_header=True)
if self.req.lastEffectiveURL == "http://www.myvideo.de/":
return False
return True
| gpl-3.0 | 5,561,081,885,100,587,000 | 27.897959 | 106 | 0.579096 | false |
domob1812/crowncoin | qa/rpc-tests/mempool_spendcoinbase.py | 10 | 2772 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ self.create_tx(txid, node0_address, 500) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].setgenerate(True, 1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| mit | 7,674,461,438,736,598,000 | 39.173913 | 92 | 0.674242 | false |
decodio/l10n_hr | l10n_hr_vat/todo/report_base/reports_common.py | 1 | 4807 | # -*- coding: utf-8 -*-
import pytz
from datetime import datetime
from openerp.osv import osv, fields
def get_current_datetime():
#DB: Server mora biti na UTC time... ovo vraća točan local time za XML, fiskalizaciju isl...
zg = pytz.timezone('Europe/Zagreb')
return zg.normalize(pytz.utc.localize(datetime.utcnow()).astimezone(zg)).strftime("%Y-%m-%dT%H:%M:%S")
def get_izvjesce_sastavio(self, cr, uid, context=None):
""" ZA XML
return: {'naziv': ime_prezime,
'ime': ime
'prezime':prezime}
"""
res={}
cr.execute("select state from ir_module_module where name='hr'")
hr = cr.fetchone()
if hr and hr[0]=='installed':
department = self.pool.get('hr.department')
main_id = department.search(cr, uid, [('parent_id','=',False)])
if len(main_id)>1:
raise osv.except_osv((u'Greška!'),(u'Vaša tvrtka ima više glavnih odjela'))
elif len(main_id)==0:
raise osv.except_osv((u'Greška!'),(u'Vaša tvrtka nema definirane odjele'))
else:
department = department.browse(cr, uid, main_id[0])
if not department.manager_id :
raise osv.except_osv((u'Greška!'),(u'Vaša tvrtka nema definiranu odgovornu osobu (manager)'))
manager_name = department.manager_id.name_related
name = manager_name.split()
res['naziv']=manager_name
if len(name) == 2:
res['ime']=name[0]
res['prezime']=name[1]
elif len(name)>2:
#TODO: dialog sa odabirom "ime ime"+"prezime" ili "ime"+"prezime prezime"...
raise osv.except_osv((u'Greška!'),('Imate dva imena i/ili dva prezimena, spojite ih znakom "-" bez razmaka'))
elif len(name)==1:
raise osv.except_osv((u'Greška!'),('Ime odgovorne osobe nije ispravno (nedostaje ime ili prezime)'))
else:
raise osv.except_osv(u'Greška!',u'Nije instaliran modul HR - nije moguće dohvatiti osobu koja je sastavila izvješće')
return res
def check_valid_phone(self,phone):
"""Za PDV obrazac:
Broj telefona, počinje sa znakom + nakon kojeg slijedi 8-13 brojeva, npr +38514445555
"""
if not phone:
return False
phone = phone.replace(" ","").replace("/","").replace(",","").replace("(","").replace(")","")
if phone.startswith('00'):
phone = '+' + phone[2:]
if not phone.startswith('+'):
phone = '+' + phone
if 14 < len(phone) < 7:
raise osv.except_osv(u'Greška u postavkama!',u'Unešeni broj telefona/faxa : %s u postavkama tvrtke nije ispravan\nOčekivani format je +385xxxxxxxx , (dozvoljno je korištenje znakova za razdvajanje i grupiranje (-/) i razmaka' % phone)
return phone
def get_company_data(self, cr, uid, report_type, context=None, hr_only=True):
"""
Dohvat podataka za Tvrtku, sa provjerom svih obaveznih podataka i njihove strukture
@report_type : PDV,
todo: JOPPD ...
"""
company_id = context.get('company_id', 1)
company = self.pool.get('res.company').browse(cr, uid, company_id)
res = {}
if hr_only and company.country_id.code != 'HR':
raise osv.except_osv('Error!','This report is for CROATIA companies only!')
#provjera company podataka
if not company.city:
raise osv.except_osv(u'Nedostaju podaci!',u'U adresi poduzeća nije upisan grad!')
if not company.street:
raise osv.except_osv(u'Nedostaju podaci!',u'U adresi poduzeća nije upisana ulica!')
if company.street2:
#TODO : provjeriti dali je ispravan kućni broj
pass
if not company.vat:
raise osv.except_osv(u'Nedostaju podaci!',u'U postavkama poduzeća nedostaje OIB!')
oib = company.vat
if oib.startswith('HR'):
oib = oib[2:]
res = {
'naziv': company.name,
'ulica': company.street,
'kbr': company.street2 and company.street2 or False,
'mjesto': company.city,
'posta':company.zip,
'email': company.partner_id.email and company.partner_id.email or False,
'oib': oib,
'tel': check_valid_phone(self, company.partner_id.phone),
'fax': check_valid_phone(self, company.partner_id.fax),
}
#2. Porezna ispostava
if report_type=='PDV':
if not company.porezna_code:
raise osv.except_osv(u'Nedostaju podaci!',u'U postavkama poduzeća unesite broj porezne ispostave!')
res['porezna_code'] = company.porezna_code
if not company.porezna:
raise osv.except_osv(u'Nedostaju podaci!',u'U postavkama poduzeća unesite naziv porezne ispostave!')
res['porezna']=company.porezna
return res
| agpl-3.0 | 7,125,257,429,825,850,000 | 39.184874 | 242 | 0.608032 | false |
jendap/tensorflow | tensorflow/contrib/learn/python/learn/models.py | 42 | 16048 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various high level TF models (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.summary import summary
from tensorflow.python.util.deprecation import deprecated
@deprecated(None, 'Consider using a tf.estimator.LinearRegressor')
def linear_regression_zero_init(x, y):
"""Linear regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
Returns:
Predictions and loss tensors.
"""
return linear_regression(x, y, init_mean=0.0, init_stddev=0.0)
@deprecated(None, 'Consider using a class from tf.estimator.LinearClassifier')
def logistic_regression_zero_init(x, y):
"""Logistic regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
Returns:
Predictions and loss tensors.
"""
return logistic_regression(x, y, init_mean=0.0, init_stddev=0.0)
@deprecated(None, 'Consider using a class from tf.estimator.')
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
init_mean: the mean value to use for initialization.
init_stddev: the standard deviation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape], dtype=dtype)
bias = vs.get_variable('bias', [output_shape], dtype=dtype)
else:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable(
'bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
@deprecated(None, 'Consider using a class from tf.estimator.')
def logistic_regression(x,
y,
class_weight=None,
init_mean=None,
init_stddev=1.0):
"""Creates logistic regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for labels (one-hot),
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
init_mean: the mean value to use for initialization.
init_stddev: the standard deviation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('logistic_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], y.get_shape()[-1]], dtype=dtype)
bias = vs.get_variable('bias', [y.get_shape()[-1]], dtype=dtype)
else:
weights = vs.get_variable(
'weights', [x.get_shape()[1], y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable(
'bias', [y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = ops.get_default_graph().get_tensor_by_name(
'class_weight:0')
except KeyError:
pass
return losses_ops.softmax_classifier(
x, y, weights, bias, class_weight=class_weight)
## This will be in TensorFlow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply
reverses the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops_.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops_.unpack(s_reversed)
return result
@deprecated(None, 'Please consider `tf.nn.bidirectional_dynamic_rnn`.')
def bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states
are ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size
[batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
state is the concatenated final state of the forward and backward RNN
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, contrib_rnn.RNNCell):
raise TypeError('cell_fw must be an instance of RNNCell')
if not isinstance(cell_bw, contrib_rnn.RNNCell):
raise TypeError('cell_bw must be an instance of RNNCell')
if not isinstance(inputs, list):
raise TypeError('inputs must be a list')
if not inputs:
raise ValueError('inputs must not be empty')
name = scope or 'BiRNN'
# Forward direction
with vs.variable_scope(name + '_FW'):
output_fw, state_fw = contrib_rnn.static_rnn(cell_fw, inputs,
initial_state_fw, dtype,
sequence_length)
# Backward direction
with vs.variable_scope(name + '_BW'):
tmp, state_bw = contrib_rnn.static_rnn(
cell_bw,
_reverse_seq(inputs, sequence_length), initial_state_bw, dtype,
sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [
array_ops_.concat([fw, bw], 1) for fw, bw in zip(output_fw, output_bw)
]
return outputs, array_ops_.concat([state_fw, state_bw], 1)
# End of TensorFlow 0.7
@deprecated(None, 'Please consider tensorflow/tensor2tensor.')
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
target_predictor_fn, sequence_length, initial_state,
attn_length, attn_size, attn_vec_size):
"""Returns a function that creates a RNN TensorFlow subgraph.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument `x` for input and returns transformed `x`.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes `x`, `y` and returns predictions and loss
tensors.
sequence_length: If sequence_length is provided, dynamic calculation is
performed. This saves computational time when unrolling past max sequence
length. Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell
state.
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(x, y):
"""RNN estimator with target predictor function on top."""
x = input_op_fn(x)
if cell_type == 'rnn':
cell_fn = contrib_rnn.BasicRNNCell
elif cell_type == 'gru':
cell_fn = contrib_rnn.GRUCell
elif cell_type == 'lstm':
cell_fn = functools.partial(
contrib_rnn.BasicLSTMCell, state_is_tuple=False)
else:
raise ValueError('cell_type {} is not supported. '.format(cell_type))
# TODO(ipolosukhin): state_is_tuple=False is deprecated
if bidirectional:
# forward direction cell
fw_cell = lambda: cell_fn(rnn_size)
bw_cell = lambda: cell_fn(rnn_size)
# attach attention cells if specified
if attn_length is not None:
def attn_fw_cell():
return contrib_rnn.AttentionCellWrapper(
fw_cell(),
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
def attn_bw_cell():
return contrib_rnn.AttentionCellWrapper(
bw_cell(),
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
else:
attn_fw_cell = fw_cell
attn_bw_cell = bw_cell
rnn_fw_cell = contrib_rnn.MultiRNNCell(
[attn_fw_cell() for _ in range(num_layers)], state_is_tuple=False)
# backward direction cell
rnn_bw_cell = contrib_rnn.MultiRNNCell(
[attn_bw_cell() for _ in range(num_layers)], state_is_tuple=False)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
_, encoding = bidirectional_rnn(
rnn_fw_cell,
rnn_bw_cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
rnn_cell = lambda: cell_fn(rnn_size)
if attn_length is not None:
def attn_rnn_cell():
return contrib_rnn.AttentionCellWrapper(
rnn_cell(),
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
else:
attn_rnn_cell = rnn_cell
cell = contrib_rnn.MultiRNNCell(
[attn_rnn_cell() for _ in range(num_layers)], state_is_tuple=False)
_, encoding = contrib_rnn.static_rnn(
cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding, y)
return rnn_estimator
| apache-2.0 | -6,421,729,050,697,362,000 | 38.429975 | 93 | 0.657278 | false |
mamaddeveloper/teleadmin | tools/vote.py | 2 | 1778 | class VoteManager:
def __init__(self):
self.currentVote = None
def start(self, user, name):
if self.currentVote:
raise OngoingVoteException()
self.currentVote = Vote(name)
def vote(self, user, vote):
if not self.currentVote:
raise NoVoteException()
self.currentVote.vote(user["id"], vote)
def state(self):
if not self.currentVote:
raise NoVoteException()
return VoteResult(self.currentVote, False)
def close(self, user):
if not self.currentVote:
raise NoVoteException()
vote = self.currentVote
self.currentVote = None
return VoteResult(vote, True)
class VoteResult:
def __init__(self, vote, end):
if not isinstance(vote, Vote):
raise ValueError("Vote is not a vote")
self.name = vote.name
self.votesFor = vote.votesFor
self.votesAgainst = vote.votesAgainst
self.voteCount = len(vote.votes)
self.rate = 0 if self.voteCount == 0 else self.votesFor / self.voteCount
self.end = bool(end)
class Vote:
def __init__(self, name):
self.name = name
self.votesFor = 0
self.votesAgainst = 0
self.votes = []
def has_voted(self, user_id):
return user_id in self.votes
def vote(self, user_id, vote):
if self.has_voted(user_id):
raise AlreadyVoteException()
self.votes.append(user_id)
if vote:
self.votesFor += 1
else:
self.votesAgainst += 1
class VoteException(Exception):
pass
class OngoingVoteException(VoteException):
pass
class NoVoteException(VoteException):
pass
class AlreadyVoteException(VoteException):
pass
| mit | 7,388,641,537,576,051,000 | 25.537313 | 80 | 0.603487 | false |
Frodox/buildbot | master/buildbot/test/regressions/test_bad_change_properties_rows.py | 10 | 2880 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.trial import unittest
from buildbot.db import changes
from buildbot.test.fake import fakedb
from buildbot.test.util import connector_component
class TestBadRows(connector_component.ConnectorComponentMixin,
unittest.TestCase):
# See bug #1952 for details. This checks that users who used a development
# version between 0.8.3 and 0.8.4 get reasonable behavior even though some
# rows in the change_properties database do not contain a proper [value,
# source] tuple.
def setUp(self):
d = self.setUpConnectorComponent(
table_names=['patches', 'sourcestamps', 'changes',
'change_properties', 'change_files'])
@d.addCallback
def finish_setup(_):
self.db.changes = changes.ChangesConnectorComponent(self.db)
return d
def tearDown(self):
return self.tearDownConnectorComponent()
def test_bogus_row_no_source(self):
d = self.insertTestData([
fakedb.SourceStamp(id=10),
fakedb.ChangeProperty(changeid=13, property_name='devel',
property_value='"no source"'),
fakedb.Change(changeid=13, sourcestampid=10),
])
@d.addCallback
def get13(_):
return self.db.changes.getChange(13)
@d.addCallback
def check13(c):
self.assertEqual(c['properties'],
dict(devel=('no source', 'Change')))
return d
def test_bogus_row_jsoned_list(self):
d = self.insertTestData([
fakedb.SourceStamp(id=10),
fakedb.ChangeProperty(changeid=13, property_name='devel',
property_value='[1, 2]'),
fakedb.Change(changeid=13, sourcestampid=10),
])
@d.addCallback
def get13(_):
return self.db.changes.getChange(13)
@d.addCallback
def check13(c):
self.assertEqual(c['properties'],
dict(devel=([1, 2], 'Change')))
return d
| gpl-2.0 | 6,025,616,173,382,561,000 | 35 | 79 | 0.633333 | false |
JioCloud/horizon | openstack_dashboard/dashboards/admin/metering/tabs.py | 13 | 2197 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.utils.translation import ugettext_lazy as _
from horizon import messages
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.api import ceilometer
class GlobalStatsTab(tabs.Tab):
name = _("Stats")
slug = "stats"
template_name = ("admin/metering/stats.html")
preload = False
@staticmethod
def _get_flavor_names(request):
try:
flavors = api.nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def get_context_data(self, request):
meters = ceilometer.Meters(request)
if not meters._ceilometer_meter_list:
msg = _("There are no meters defined yet.")
messages.warning(request, msg)
context = {
'nova_meters': meters.list_nova(),
'neutron_meters': meters.list_neutron(),
'glance_meters': meters.list_glance(),
'cinder_meters': meters.list_cinder(),
'swift_meters': meters.list_swift(),
'kwapi_meters': meters.list_kwapi(),
}
return context
class DailyReportTab(tabs.Tab):
name = _("Daily Report")
slug = "daily_report"
template_name = ("admin/metering/daily.html")
def get_context_data(self, request):
context = template.RequestContext(request)
return context
class CeilometerOverviewTabs(tabs.TabGroup):
slug = "ceilometer_overview"
tabs = (DailyReportTab, GlobalStatsTab, )
sticky = True
| apache-2.0 | -8,233,409,523,309,516,000 | 30.84058 | 75 | 0.65635 | false |
ConnorGBrewster/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/mark/__init__.py | 33 | 5165 | """ generic mechanism for marking and selecting python functions. """
from __future__ import absolute_import, division, print_function
from _pytest.config import UsageError
from .structures import (
ParameterSet,
EMPTY_PARAMETERSET_OPTION,
MARK_GEN,
Mark,
MarkInfo,
MarkDecorator,
MarkGenerator,
transfer_markers,
get_empty_parameterset_mark,
)
from .legacy import matchkeyword, matchmark
__all__ = [
"Mark",
"MarkInfo",
"MarkDecorator",
"MarkGenerator",
"transfer_markers",
"get_empty_parameterset_mark",
]
class MarkerError(Exception):
"""Error in use of a pytest marker/attribute."""
def param(*values, **kw):
"""Specify a parameter in `pytest.mark.parametrize`_ calls or
:ref:`parametrized fixtures <fixture-parametrize-marks>`.
.. code-block:: python
@pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
pytest.param("6*9", 42, marks=pytest.mark.xfail),
])
def test_eval(test_input, expected):
assert eval(test_input) == expected
:param values: variable args of the values of the parameter set, in order.
:keyword marks: a single mark or a list of marks to be applied to this parameter set.
:keyword str id: the id to attribute to this parameter set.
"""
return ParameterSet.param(*values, **kw)
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"-k",
action="store",
dest="keyword",
default="",
metavar="EXPRESSION",
help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test_"
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other', while -k 'not test_method' "
"matches those that don't contain 'test_method' in their names. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them.",
)
group._addoption(
"-m",
action="store",
dest="markexpr",
default="",
metavar="MARKEXPR",
help="only run tests matching given mark expression. "
"example: -m 'mark1 and not mark2'.",
)
group.addoption(
"--markers",
action="store_true",
help="show markers (builtin, plugin and per-project ones).",
)
parser.addini("markers", "markers for test functions", "linelist")
parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
def pytest_cmdline_main(config):
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini("markers"):
parts = line.split(":", 1)
name = parts[0]
rest = parts[1] if len(parts) == 2 else ""
tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
pytest_cmdline_main.tryfirst = True
def deselect_by_keyword(items, config):
keywordexpr = config.option.keyword.lstrip()
if keywordexpr.startswith("-"):
keywordexpr = "not " + keywordexpr[1:]
selectuntil = False
if keywordexpr[-1:] == ":":
selectuntil = True
keywordexpr = keywordexpr[:-1]
remaining = []
deselected = []
for colitem in items:
if keywordexpr and not matchkeyword(colitem, keywordexpr):
deselected.append(colitem)
else:
if selectuntil:
keywordexpr = None
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
def deselect_by_mark(items, config):
matchexpr = config.option.markexpr
if not matchexpr:
return
remaining = []
deselected = []
for item in items:
if matchmark(item, matchexpr):
remaining.append(item)
else:
deselected.append(item)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
def pytest_collection_modifyitems(items, config):
deselect_by_keyword(items, config)
deselect_by_mark(items, config)
def pytest_configure(config):
config._old_mark_config = MARK_GEN._config
if config.option.strict:
MARK_GEN._config = config
empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
if empty_parameterset not in ("skip", "xfail", None, ""):
raise UsageError(
"{!s} must be one of skip and xfail,"
" but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)
)
def pytest_unconfigure(config):
MARK_GEN._config = getattr(config, "_old_mark_config", None)
| mpl-2.0 | 3,887,302,186,379,557,000 | 28.683908 | 89 | 0.62575 | false |
Jorge-Rodriguez/ansible | lib/ansible/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py | 31 | 3834 | #!/usr/bin/python
# Copyright: (c) 2018, Juergen Wiebe <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: utm_network_interface_address
author:
- Juergen Wiebe (@steamx)
short_description: Create, update or destroy network/interface_address object
description:
- Create, update or destroy a network/interface_address object in SOPHOS UTM.
- This module needs to have the REST Ability of the UTM to be activated.
version_added: "2.8"
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
address:
description:
- The ip4 address of the network/interface_address object.
required: true
address6:
description:
- The ip6 address of the network/interface_address object.
required: false
comment:
description:
- An optional comment to add to the object
resolved:
description:
- Whether or not the object is resolved
resolved6:
description:
- Whether or not the object is resolved
extends_documentation_fragment:
- utm
"""
EXAMPLES = """
# Create a network interface address
- name: utm network interface address
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestNetworkInterfaceAddress
address: 0.0.0.0
state: present
# Remove a network interface address
- name: utm network interface address
network_interface_address:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestNetworkInterfaceAddress
address: 0.0.0.0
state: absent
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: string
_locked:
description: Whether or not the object is currently locked
type: boolean
_type:
description: The type of the object
type: string
name:
description: The name of the object
type: string
address:
description: The ip4 address of the network/interface_address object
type: string
address6:
description: The ip6 address of the network/interface_address object
type: string
comment:
description: The comment string
type: string
resolved:
description: Whether or not the object is resolved
type: boolean
resolved6:
description: Whether or not the object is resolved
type: boolean
"""
from ansible.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "network/interface_address"
key_to_check_for_changes = ["comment", "address"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
address=dict(type='str', required=True),
comment=dict(type='str', required=False, default=""),
address6=dict(type='str', required=False),
resolved=dict(type='boolean', required=False),
resolved6=dict(type='boolean', required=False)
)
)
try:
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 6,171,150,039,739,485,000 | 27.191176 | 92 | 0.630151 | false |
jtallieu/dell-wsman-client-api-python | wsman/cache/ordereddict.py | 2 | 4542 | ## {{{ http://code.activestate.com/recipes/576693/ (r6)
"""
OrderedDict
@copyright: 2011
@author: Joseph Tallieu <[email protected]>
@organization: Dell Inc. - PG Validation
@license: GNU LGLP v2.1
"""
# This file is part of WSManAPI.
#
# WSManAPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# WSManAPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with WSManAPI. If not, see <http://www.gnu.org/licenses/>.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link[1]
link_next = link[2]
link_prev[2] = link_next
link_next[1] = link_prev
root = self.__end
if last:
last = root[1]
link[1] = last
link[2] = root
last[2] = root[1] = link
else:
first = root[2]
link[1] = root
link[2] = first
root[2] = first[1] = link
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
## end of http://code.activestate.com/recipes/576693/ }}}
| gpl-3.0 | 5,136,297,606,282,613,000 | 28.686275 | 80 | 0.547556 | false |
sergioSEa/Developed_scripts | fastq-stats.py | 1 | 8236 | #!/usr/bin/python
import argparse
from PIL import Image, ImageDraw, ImageFont, ImageOps
parser = argparse.ArgumentParser()
parser.add_argument('-fasq', action="store", dest = 'File', required = "True")
parser.add_argument('-out', action="store", dest = 'out', required = "True")
args = parser.parse_args()
reverse = "no"
files = args.File
out = args.out
#phred_not="J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,[,\,],^,_,`,a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,{,|,},~"
def character_to_ASCII(string):
st = []
phred_result = 0
for items in string:
if len(items)>1:
for i in items:
ascii = ord(i)
if int(ascii) >= 75:
phred_result = 1
ascii = ascii-33
else:
ascii = ord(items)
ascii = ascii-33
if ascii >= 75:
phred_result = 1#if ascii not in -33 result = 1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
st.append(int(ascii))
return st,phred_result
def fasq_preprocess(fil):
fastaq_list = []
i = 0
with open(fil) as fastaq:
p = 1
for lines in fastaq:
lines = lines.rstrip()
if p%4 == 0:
fastaq_list.append(lines)
i += 1
p +=1
if i > 20000:
break
ascii_check = character_to_ASCII(fastaq_list)[1]
return fastaq_list, ascii_check
def fasq_process(fil,x,group):
dic = {}
dic[str(x)+"_"+str(x+group)] = []
i = 0
p = 1
with open(fil) as fastaq:
for lines in fastaq:
lines = lines.rstrip()
if p%4== 0:
for items in range(x,x+group):
#print x, x+group,items
try:
dic[str(x)+"_"+str(x+group)].append(lines[items])
except:
h = "h"
i += 1
if i >= 5000:
break
p += 1
qual_dic= {}
for position in dic:
qual_dic[position] = character_to_ASCII(dic[position])[0]
return qual_dic
def average(lista):
n = 0
l = []
for items in lista:
l.append(float(items))
n += 1
try:
average = sum(l)/n
except:
average = 0
return average
def boxplot_stats(lista):
lista = sorted(lista)
lenght = len(lista)
position1 = float(lenght/2)
if position1.is_integer():
per_50 = lista[int(position1)]
else:
position1 = position1.split(".")[0]
position2 = position1 + 1
position1 = lista[int(position1)]
position2 = lista[int(position2)]
per_50 = (postion1 + position2)/2
position2 = float((lenght+1)/4)
if position2.is_integer():
per_25 = lista[int(position2)]
else:
position2 = position2.split(".")[0]
position3 = position2 + 1
position2 = lista[int(position2)]
position3 = lista[int(position3)]
position4 = position2 + position3*position2.split(".")[1]*((lenght+1)/4)
per_25 = int(position4)
position3 = float((lenght+1)*3/4)
if position3.is_integer():
per_75 = lista[int(position3)]
else:
position3 = position3.split(".")[0]
position4 = position4 + 1
position3 = lista[int(position3)]
position4 = lista[int(position4)]
position5 = position3 + position4*position3.split(".")[1]*((lenght+1)*3/4)
per_75 = int(position5)
IQR = int(per_75) - int(per_25)
Extreme_positive = 1.5*IQR + float(per_75)
Extreme_negative = float(per_25)-1.5*IQR
if Extreme_negative < 0:
Extreme_negative = 0
if float(min(lista))> float(Extreme_negative): Extreme_negative = min(lista)
if float(max(lista))< float(Extreme_positive): Extreme_positive = max(lista)
return float(Extreme_negative), float(per_25), float(per_50), float(per_75), float(Extreme_positive)
def calculations(dic_pos):
for item in dic_pos:
average_val = average(dic_pos[item])
stats = boxplot_stats(dic_pos[item])
return average_val,stats[0],stats[1],stats[2],stats[3],stats[4]
def lenght_reads_cal(pre_dic):
value_list = []
biggest = 0
for reads in pre_dic:
if len(reads) > biggest:
biggest = len(reads)
return biggest
def Draw_box_plot(table,out):
fnt1 = ImageFont.truetype('./fonts/VeraMono.ttf', 14)
#Size of the window
a = 60
b = 70
c = 20
x_window = len(table)*10+ 10+ a + b
y_window = 460
#Generation of the file
im = Image.new("RGB", (x_window, y_window), (255,255,255))
draw = ImageDraw.Draw(im)
#Creation of the axiss: axiss will start with an indentation of 20 above, below and beside. Each Phred quality score will be in a X% proportion of the y_window px
size_y_axis = y_window-80 #Total size minus above and below indentations
position_y_axis= size_y_axis+20
draw.line(((a, c) + (a, position_y_axis)), fill=(0, 0, 0, 0), width=1)
size_x_axis = len(table)*10 +10 #number of positions*10 pxls which is one will take + 10 for the position 1.
draw.line(((a, position_y_axis) + (a+size_x_axis, position_y_axis)), fill=(0, 0, 0, 0), width=1)
#Close chart
draw.line(((a, c) + (size_x_axis+a, c)), fill=(0, 0, 0, 0), width=1)
draw.line(((size_x_axis+a, c) + (size_x_axis+a, position_y_axis)), fill=(0, 0, 0, 0), width=1)
#Vertical values
step = float(size_y_axis)/42
j = 0
for values in range(42,-1,-1):
draw.line(((a,20+abs(values-42)*step) + (a-4,20+abs(values-42)*step)), fill=(0, 0, 0, 0), width=1)
if values%5 == 0:
draw.line(((a,20+abs(values-42)*step) + (a-6,20+abs(values-42)*step)), fill=(0, 0, 0, 0), width=1)
text = abs(values-42)
w, h = draw.textsize(str(text))
draw.text((a-25,20+text*step-h/2-3), str(values), font=fnt1, fill=(0,0,0,0))
j +=1
i = 10 + a #indentation + space for the first box (same space as in size_x_axis)
for position in table:
name = position[0]
position = position[1:]
#write the position in the x axis
draw.line(((i, position_y_axis) + (i, position_y_axis+4)), fill=(0, 0, 0, 0), width=1)
if (i-a)%50 == 0 :
draw.line(((i, position_y_axis) + (i, position_y_axis+6)), fill=(0, 0, 0, 0), width=1)
w, h = draw.textsize(str(name))
if len(name)==3:draw.text((i-w/2-4+1 , position_y_axis+10), name, font=fnt1, fill=(0,0,0,0))
if len(name) == 4: draw.text((i-w/2-3 +4, position_y_axis+10), name, font=fnt1, fill=(0,0,0,0))
elif len(name) == 5: draw.text((i-w/2-4, position_y_axis+10), name, font=fnt1, fill=(0,0,0,0))
elif len(name) == 6: draw.text((i-w/2-9, position_y_axis+10), name, font=fnt1, fill=(0,0,0,0))
elif len(name) == 7: draw.text((i-w/2-14, position_y_axis+10), name, font=fnt1, fill=(0,0,0,0))
#Create a line from the begining to the end of the parameters
beg = float(position[1]) * step
end = float(position[-1]) * step
draw.line(((i, position_y_axis-beg) + (i, position_y_axis-end)), fill=(0, 0, 0, 0), width=1)
#Close the whiskers
draw.line(((i-1, position_y_axis-beg)+(i+1, position_y_axis-beg)),fill=(0, 0, 0, 0), width=1)
draw.line(((i-1, position_y_axis-end)+(i+1, position_y_axis-end)),fill=(0, 0, 0, 0), width=1)
#Create the boxplot
beg = float(position[2]) * step
end = float(position[-2]) * step
draw.rectangle([(i-3, position_y_axis-beg), (i+3, position_y_axis-end)], fill=(24, 56, 214), outline= None)
#Draw the average and the MEDIANA?
av = float(position[0]) * step
med = float(position[3]) * step
draw.line(((i-3, position_y_axis-med) + (i+3, position_y_axis-med)), fill=(191, 17, 54), width=1)
draw.line(((i, position_y_axis-av) + (i, position_y_axis-av)), fill=(50, 214, 25), width=1)
i +=10
#axiss draw:
x_name = "Position (bp)"
draw.text((size_x_axis/2, position_y_axis+35), x_name , font=fnt1, fill=(0,0,0,0)) #Horizontal
y_name ="Quality (phred)"
label = Image.new("RGB", (140, 20), (255,255,255))
draw2 = ImageDraw.Draw(label)
draw2.text((1, 1), y_name, font=fnt1, fill=(0,0,0))
label = label.rotate(90)
im.paste(label, (2,size_y_axis/2-50))
#Vertical
#save image, specifying the format with the extension
im.save(out)
pre_dic = fasq_preprocess(files)[0]
phred_result = fasq_preprocess(files)[1]
if phred_result == 1:
print phred_result
exit()
lenght_reads = int(lenght_reads_cal(pre_dic))
if lenght_reads<80: group = 1
if 100>=lenght_reads>80: group = 2
if lenght_reads>100: group = 5
x =0
final_dic= {}
#print range((lenght_reads/group)-1)
#exit()
final_list=[]
for repetitions in range((lenght_reads/group)-1):
lis = []
position_table=fasq_process(files,x,group)
result = calculations(position_table)
lis.append(str(x+1)+"-"+str(x+group))
lis.extend(result)
final_list.append(lis)
#final_dic[str(x+1)+"-"+str(x+group)]= result
x += group
Draw_box_plot(final_list,out)
print phred_result
| gpl-3.0 | 6,892,022,040,094,627,000 | 28.102473 | 164 | 0.631496 | false |
NaturalGIS/naturalgis_qgis | python/plugins/processing/algs/qgis/Delaunay.py | 15 | 5994 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Delaunay.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsApplication,
QgsField,
QgsFeatureRequest,
QgsFeatureSink,
QgsFeature,
QgsGeometry,
QgsPointXY,
QgsWkbTypes,
QgsProcessing,
QgsFields,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from . import voronoi
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class Delaunay(QgisAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmDelaunay.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmDelaunay.svg")
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT, self.tr('Input layer'), [QgsProcessing.TypeVectorPoint]))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Delaunay triangulation'), type=QgsProcessing.TypeVectorPolygon))
def name(self):
return 'delaunaytriangulation'
def displayName(self):
return self.tr('Delaunay triangulation')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
fields = QgsFields()
fields.append(QgsField('POINTA', QVariant.Double, '', 24, 15))
fields.append(QgsField('POINTB', QVariant.Double, '', 24, 15))
fields.append(QgsField('POINTC', QVariant.Double, '', 24, 15))
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Polygon, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
pts = []
ptDict = {}
ptNdx = -1
c = voronoi.Context()
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, inFeat in enumerate(features):
if feedback.isCanceled():
break
geom = QgsGeometry(inFeat.geometry())
if geom.isNull():
continue
if geom.isMultipart():
points = geom.asMultiPoint()
else:
points = [geom.asPoint()]
for n, point in enumerate(points):
x = point.x()
y = point.y()
pts.append((x, y))
ptNdx += 1
ptDict[ptNdx] = (inFeat.id(), n)
feedback.setProgress(int(current * total))
if len(pts) < 3:
raise QgsProcessingException(
self.tr('Input file should contain at least 3 points. Choose '
'another file and try again.'))
uniqueSet = set(item for item in pts)
ids = [pts.index(item) for item in uniqueSet]
sl = voronoi.SiteList([voronoi.Site(*i) for i in uniqueSet])
c.triangulate = True
voronoi.voronoi(sl, c)
triangles = c.triangles
feat = QgsFeature()
total = 100.0 / len(triangles) if triangles else 1
for current, triangle in enumerate(triangles):
if feedback.isCanceled():
break
indices = list(triangle)
indices.append(indices[0])
polygon = []
attrs = []
step = 0
for index in indices:
fid, n = ptDict[ids[index]]
request = QgsFeatureRequest().setFilterFid(fid)
inFeat = next(source.getFeatures(request))
geom = QgsGeometry(inFeat.geometry())
if geom.isMultipart():
point = QgsPointXY(geom.asMultiPoint()[n])
else:
point = QgsPointXY(geom.asPoint())
polygon.append(point)
if step <= 3:
attrs.append(ids[index])
step += 1
feat.setAttributes(attrs)
geometry = QgsGeometry().fromPolygonXY([polygon])
feat.setGeometry(geometry)
sink.addFeature(feat, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
| gpl-2.0 | 3,523,150,725,151,763,000 | 36.229814 | 146 | 0.52636 | false |
4doemaster/enigma2 | lib/python/Screens/PasswordBox.py | 5 | 3705 | # [iq]
from Screen import Screen
from Screens.MessageBox import MessageBox
from Components.config import ConfigSubsection,ConfigInteger,getConfigListEntry, ConfigSelection, ConfigTextLeft, ConfigPassword, config
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap, NumberActionMap, HelpableActionMap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from os import system
class PasswordBox(ConfigListScreen, Screen):
skin = """
<screen name="PasswordBox" position="center,center" size="520,250" title="Check Password">
<widget name="state" position="10,30" zPosition="1" size="500,30" font="Regular;20" halign="left" valign="center" />
<widget name="config" position="10,90" size="500,80" scrollbarMode="showOnDemand" />
<widget name="text" position="10,170" zPosition="1" size="500,40" font="Regular;20" halign="center" valign="center" />
<ePixmap alphatest="on" pixmap="%s/buttons/red.png" position="10,210" size="140,40"/>
<ePixmap alphatest="on" pixmap="%s/buttons/blue.png" position="370,210" size="140,40"/>
<widget font="Regular;20" halign="center" position="10,210" render="Label" size="140,40" source="key_red" transparent="1" valign="center" zPosition="1"/>
<widget font="Regular;20" halign="center" position="380,210" render="Label" size="140,40" source="key_blue" transparent="1" valign="center" zPosition="1"/>
</screen>""" % (("ViX_HD_Common" if "ViX" in config.skin.primary_skin.value.split("/")[0] else "skin_default"),
("ViX_HD_Common" if "ViX" in config.skin.primary_skin.value.split("/")[0] else "skin_default"))
#( config.skin.primary_skin.value.split("/")[0], config.skin.primary_skin.value.split("/")[0] )
def __init__(self, session):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_blue"] = StaticText(_("OK"))
self["text"] = Label(_(""))
self["state"] = Label(_(""))
self["actions"] = NumberActionMap(["SetupActions"],
{
"cancel": self.keyCancel,
}, -2)
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": (self.close, _("exit")),
"blue": (self.checkGo, _("check password")),
})
self.list = []
ConfigListScreen.__init__(self, self.list)
self.pw = ConfigSubsection()
self.createConfig()
self.createSetup()
self.timeout=8
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def checkGo(self):
print "#checking password :",self.pw.enteredPassword.value
self.pw.enteredPassword.value=self.pw.enteredPassword.value.rstrip()
if self.checkPassword("root", self.pw.enteredPassword.value):
self.close(True)
else:
msg=_("Wrong password ('%s')" % (self.pw.enteredPassword.value))
self.session.open(MessageBox, msg, MessageBox.TYPE_INFO, self.timeout)
self.createConfig()
self.createSetup()
def keyCancel(self):
self.close(False)
def createConfig(self):
self.pw.enteredPassword = ConfigTextLeft("", 20)
self.pw.enteredPassword.setUseableChars(u"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
def createSetup(self):
self.list = []
self["text"].setText(_("Press blue button(OK) after entering current password"))
self.list.append(getConfigListEntry(_('Enter password :'), self.pw.enteredPassword))
self["state"].setText(_("Enter password with characters (0-9, a-z, A-Z)"))
self["config"].list = self.list
self["config"].l.setList(self.list)
def checkPassword(self, user, password):
import crypt, pwd
try:
pw1 = pwd.getpwnam(user)[1]
pw2 = crypt.crypt(password, pw1)
return pw1 == pw2
except KeyError:
return 0 # no such user
| gpl-2.0 | -7,666,159,819,359,619,000 | 39.271739 | 158 | 0.705533 | false |
x75/mavlink | pymavlink/examples/sigloss.py | 4 | 2131 | #!/usr/bin/env python
'''
show times when signal is lost
'''
import sys, time, os
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from optparse import OptionParser
parser = OptionParser("sigloss.py [options]")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--robust",dest="robust", action='store_true', help="Enable robust parsing (skip over bad data)")
parser.add_option("--deltat", type='float', default=1.0, help="loss threshold in seconds")
parser.add_option("--condition",dest="condition", default=None, help="select packets by condition")
parser.add_option("--types", default=None, help="types of messages (comma separated)")
(opts, args) = parser.parse_args()
import mavutil
if len(args) < 1:
print("Usage: sigloss.py [options] <LOGFILE...>")
sys.exit(1)
def sigloss(logfile):
'''work out signal loss times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename,
planner_format=opts.planner,
notimestamps=opts.notimestamps,
robust_parsing=opts.robust)
last_t = 0
types = opts.types
if types is not None:
types = types.split(',')
while True:
m = mlog.recv_match(condition=opts.condition)
if m is None:
return
if types is not None and m.get_type() not in types:
continue
if opts.notimestamps:
if not 'usec' in m._fieldnames:
continue
t = m.usec / 1.0e6
else:
t = m._timestamp
if last_t != 0:
if t - last_t > opts.deltat:
print("Sig lost for %.1fs at %s" % (t-last_t, time.asctime(time.localtime(t))))
last_t = t
total = 0.0
for filename in args:
sigloss(filename)
| lgpl-3.0 | 1,039,134,100,863,837,800 | 33.370968 | 115 | 0.608165 | false |
wuqize/FluentPython | chapter2/_numetuple.py | 1 | 1681 | # -*- coding: utf-8 -*-
"""
Created on Sun May 07 14:01:20 2017
"""
"""
from collections import namedtuple
ll = namedtuple('ll',['x', 'y'])
ll
Out[50]: __main__.ll
ll._fields
Out[51]: ('x', 'y')
City = namedtuple('City', 'name country population coordinates')
tokyo = City('Tokyo', 'JP', 36.933, (35.689722, 139.691667))
LatLong = namedtuple('LatLong', 'lat long')
delhi_data = ('Delhi NCR', 'IN', 21.935, LatLong(28.613889, 77.208889))
tokyo._asdict
Out[56]: <bound method City._asdict of City(name='Tokyo', country='JP', population=36.933, coordinates=(35.689722, 139.691667))>
tokyo._asdict()
Out[57]:
OrderedDict([('name', 'Tokyo'),
('country', 'JP'),
('population', 36.933),
('coordinates', (35.689722, 139.691667))])
delhi = City._make(delhi_data)
delhi_data = ('Delhi NCR', 'IN', 21.935, LatLong(28.613889, 77.208889))
delhi = City._make(delhi_data)
delhi._asdict()
Out[61]:
OrderedDict([('name', 'Delhi NCR'),
('country', 'IN'),
('population', 21.935),
('coordinates', LatLong(lat=28.613889, long=77.208889))])
"""
metro_areas = [
('Tokyo', 'JP', 36.933, (35.689722, 139.691667)), # <1>
('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),
('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),
('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),
('Sao Paulo', 'BR', 19.649, (-23.547778, -46.635833)),
]
print('{:15} | {:^9} | {:^9}'.format('', 'lat.', 'long.'))
fmt = '{:15} | {:9.4f} | {:9.4f}'
for name, cc, pop, (latitude, longitude) in metro_areas: # <2>
if longitude <= 0: # <3>
print(fmt.format(name, latitude, longitude)) | lgpl-3.0 | -6,306,527,420,729,727,000 | 25.28125 | 128 | 0.570494 | false |
zakkum42/Bosch | src/01-features/split_date_columns.py | 1 | 1998 | import numpy as np
import pandas as pd
from datetime import datetime
from bosch_feature_list import numeric_fields, categoric_fields, date_fields, idList
from bosch_date_features import numeric_features_w_dates, date_features_w_numeric
chunksize = 100000
def binarize_for_features(fields_map, input_fname, output_fname1, output_fname2):
i = 0
for key in fields_map:
print i, key,
i += 1
if len(fields_map[key]) == 0:
print 'skipping'
continue
field_list = idList + fields_map[key]
# input_fname = output_fname1 + key + output_fname2
t0 = datetime.now()
use_header = True
output_fname = output_fname1 + key + output_fname2
for chunk in pd.read_csv(input_fname, usecols=field_list, iterator=True, chunksize=chunksize, dtype=object):
print "*",
data = pd.DataFrame()
data = data.append(chunk, ignore_index=True)
Ids = data['Id'].copy()
data[data.notnull()] = 1
data[data.isnull()] = 0
data['Id'] = Ids
data.to_csv(output_fname, mode="a", header=use_header, index=False)
use_header = False
t1 = datetime.now()
print input_fname, "loaded in", t1 - t0
def binarize_for_train_numeric():
binarize_for_features(numeric_features_w_dates, "input/train_numeric.csv", "stations/train/", "_numeric_w_date.csv")
def binarize_for_test_numeric():
binarize_for_features(numeric_features_w_dates, "input/test_numeric.csv", "stations/test/t", "_numeric_w_date.csv")
def binarize_for_train_date():
binarize_for_features(date_features_w_numeric, "input/train_date.csv", "stations/train/", "_date_w_numeric.csv")
def binarize_for_test_date():
binarize_for_features(date_features_w_numeric, "input/test_date.csv", "stations/test/t", "_date_w_numeric.csv")
# binarize_for_train_numeric()
# binarize_for_train_date()
binarize_for_test_numeric()
binarize_for_test_date()
| apache-2.0 | -4,293,318,501,645,923,000 | 37.423077 | 120 | 0.646146 | false |
atosorigin/ansible | lib/ansible/plugins/cache/__init__.py | 6 | 12844 | # (c) 2014, Michael DeHaan <[email protected]>
# (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import os
import time
import errno
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import with_metaclass
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins import AnsiblePlugin
from ansible.plugins.loader import cache_loader
from ansible.utils.collection_loader import resource_from_fqcr
from ansible.utils.display import Display
from ansible.vars.fact_cache import FactCache as RealFactCache
display = Display()
class FactCache(RealFactCache):
"""
This is for backwards compatibility. Will be removed after deprecation. It was removed as it
wasn't actually part of the cache plugin API. It's actually the code to make use of cache
plugins, not the cache plugin itself. Subclassing it wouldn't yield a usable Cache Plugin and
there was no facility to use it as anything else.
"""
def __init__(self, *args, **kwargs):
display.deprecated('ansible.plugins.cache.FactCache has been moved to'
' ansible.vars.fact_cache.FactCache. If you are looking for the class'
' to subclass for a cache plugin, you want'
' ansible.plugins.cache.BaseCacheModule or one of its subclasses.',
version='2.12', collection_name='ansible.builtin')
super(FactCache, self).__init__(*args, **kwargs)
class BaseCacheModule(AnsiblePlugin):
# Backwards compat only. Just import the global display instead
_display = display
def __init__(self, *args, **kwargs):
# Third party code is not using cache_loader to load plugin - fall back to previous behavior
if not hasattr(self, '_load_name'):
display.deprecated('Rather than importing custom CacheModules directly, use ansible.plugins.loader.cache_loader',
version='2.14', collection_name='ansible.builtin')
self._load_name = self.__module__.split('.')[-1]
self._load_name = resource_from_fqcr(self.__module__)
super(BaseCacheModule, self).__init__()
self.set_options(var_options=args, direct=kwargs)
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
class BaseFileCacheModule(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
try:
super(BaseFileCacheModule, self).__init__(*args, **kwargs)
self._cache_dir = self._get_cache_connection(self.get_option('_uri'))
self._timeout = float(self.get_option('_timeout'))
except KeyError:
self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self.plugin_name = resource_from_fqcr(self.__module__)
self._cache = {}
self.validate_cache_connection()
def _get_cache_connection(self, source):
if source:
try:
return os.path.expanduser(os.path.expandvars(source))
except TypeError:
pass
def validate_cache_connection(self):
if not self._cache_dir:
raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
"to be set (to a writeable directory path)" % self.plugin_name)
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError, IOError) as e:
raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
self.plugin_name, self._cache_dir))
def _get_cache_file_name(self, key):
prefix = self.get_option('_prefix')
if prefix:
cachefile = "%s/%s%s" % (self._cache_dir, prefix, key)
else:
cachefile = "%s/%s" % (self._cache_dir, key)
return cachefile
def get(self, key):
""" This checks the in memory cache first as the fact was not expired at 'gather time'
and it would be problematic if the key did expire after some long running tasks and
user gets 'undefined' error in the same play """
if key not in self._cache:
if self.has_expired(key) or key == "":
raise KeyError
cachefile = self._get_cache_file_name(key)
try:
value = self._load(cachefile)
self._cache[key] = value
except ValueError as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s. "
"Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
raise KeyError
except Exception as e:
raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
cachefile = self._get_cache_file_name(key)
try:
self._dump(value, cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def has_expired(self, key):
if self._timeout == 0:
return False
cachefile = self._get_cache_file_name(key)
try:
st = os.stat(cachefile)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
return False
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
keys = []
for k in os.listdir(self._cache_dir):
if not (k.startswith('.') or self.has_expired(k)):
keys.append(k)
return keys
def contains(self, key):
cachefile = self._get_cache_file_name(key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
os.stat(cachefile)
return True
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
os.remove(self._get_cache_file_name(key))
except (OSError, IOError):
pass # TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
@abstractmethod
def _load(self, filepath):
"""
Read data from a filepath and return it as a value
:arg filepath: The filepath to read from.
:returns: The value stored in the filepath
This method reads from the file on disk and takes care of any parsing
and transformation of the data before returning it. The value
returned should be what Ansible would expect if it were uncached data.
.. note:: Filehandles have advantages but calling code doesn't know
whether this file is text or binary, should be decoded, or accessed via
a library function. Therefore the API uses a filepath and opens
the file inside of the method.
"""
pass
@abstractmethod
def _dump(self, value, filepath):
"""
Write data to a filepath
:arg value: The value to store
:arg filepath: The filepath to store it at
"""
pass
class CachePluginAdjudicator(MutableMapping):
"""
Intermediary between a cache dictionary and a CacheModule
"""
def __init__(self, plugin_name='memory', **kwargs):
self._cache = {}
self._retrieved = {}
self._plugin = cache_loader.get(plugin_name, **kwargs)
if not self._plugin:
raise AnsibleError('Unable to load the cache plugin (%s).' % plugin_name)
self._plugin_name = plugin_name
def update_cache_if_changed(self):
if self._retrieved != self._cache:
self.set_cache()
def set_cache(self):
for top_level_cache_key in self._cache.keys():
self._plugin.set(top_level_cache_key, self._cache[top_level_cache_key])
self._retrieved = copy.deepcopy(self._cache)
def load_whole_cache(self):
for key in self._plugin.keys():
self._cache[key] = self._plugin.get(key)
def __repr__(self):
return to_text(self._cache)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def _do_load_key(self, key):
load = False
if all([
key not in self._cache,
key not in self._retrieved,
self._plugin_name != 'memory',
self._plugin.contains(key),
]):
load = True
return load
def __getitem__(self, key):
if self._do_load_key(key):
try:
self._cache[key] = self._plugin.get(key)
except KeyError:
pass
else:
self._retrieved[key] = self._cache[key]
return self._cache[key]
def get(self, key, default=None):
if self._do_load_key(key):
try:
self._cache[key] = self._plugin.get(key)
except KeyError as e:
pass
else:
self._retrieved[key] = self._cache[key]
return self._cache.get(key, default)
def items(self):
return self._cache.items()
def values(self):
return self._cache.values()
def keys(self):
return self._cache.keys()
def pop(self, key, *args):
if args:
return self._cache.pop(key, args[0])
return self._cache.pop(key)
def __delitem__(self, key):
del self._cache[key]
def __setitem__(self, key, value):
self._cache[key] = value
def flush(self):
self._plugin.flush()
self._cache = {}
def update(self, value):
self._cache.update(value)
| gpl-3.0 | 821,881,587,876,551,000 | 33.159574 | 156 | 0.587434 | false |
menpo/menpo | menpo/io/output/base.py | 2 | 19837 | import gzip
import warnings
from functools import partial
from pathlib import Path
from .extensions import landmark_types, image_types, pickle_types, video_types
from ..exceptions import OverwriteError
from ..utils import _norm_path, _possible_extensions_from_filepath, _normalize_extension
# an open file handle that uses a small fast level of compression
gzip_open = partial(gzip.open, compresslevel=3)
def export_landmark_file(landmarks_object, fp, extension=None, overwrite=False):
r"""
Exports a given shape. The ``fp`` argument can be either or a `str` or
any Python type that acts like a file. If a file is provided, the
``extension`` kwarg **must** be provided. If no ``extension`` is provided
and a `str` filepath is provided, then the export type is calculated
based on the filepath extension.
Due to the mix in string and file types, an explicit overwrite argument is
used which is ``False`` by default.
Parameters
----------
landmarks_object : dict or :map:`LandmarkManager` or
:map:`PointCloud` or subclass of :map:`PointCloud`
The landmarks to export. The type of :map:`PointCloud` or
subclass of it are supported by all exporters, while the
rest are available only for the LJSON format.
fp : `Path` or `file`-like object
The Path or file-like object to save the object at/into.
extension : `str` or None, optional
The extension to use, this must match the file path if the file
path is a string. Determines the type of exporter that is used.
overwrite : `bool`, optional
Whether or not to overwrite a file if it already exists.
Raises
------
ValueError
File already exists and ``overwrite`` != ``True``
ValueError
``fp`` is a `str` and the ``extension`` is not ``None``
and the two extensions do not match
ValueError
``fp`` is a `file`-like object and ``extension`` is
``None``
ValueError
The provided extension does not match to an existing exporter type
(the output type is not supported).
ValueError
The provided type for landmarks_object is not supported.
"""
extension = _normalize_extension(extension)
try:
landmarks_object.n_points
except AttributeError:
# unless this is LJSON, this is not correct.
fp_is_path = isinstance(fp, (str, Path))
if (extension is not None and extension != ".ljson") or (
fp_is_path and Path(fp).suffix != ".ljson"
):
m1 = (
"Only the LJSON format supports multiple "
"keys for exporting. \nIn any other "
"case your input should be a PointCloud or "
"subclass."
)
raise ValueError(m1)
_export(landmarks_object, fp, landmark_types, extension, overwrite)
def export_image(image, fp, extension=None, overwrite=False):
r"""
Exports a given image. The ``fp`` argument can be either
a `Path` or any Python type that acts like a file. If a file is provided,
the ``extension`` kwarg **must** be provided. If no
``extension`` is provided and a `str` filepath is provided, then
the export type is calculated based on the filepath extension.
Due to the mix of string and file types, an explicit overwrite argument is
used which is ``False`` by default.
Parameters
----------
image : :map:`Image`
The image to export.
fp : `Path` or `file`-like object
The Path or file-like object to save the object at/into.
extension : `str` or None, optional
The extension to use, this must match the file path if the file
path is a string. Determines the type of exporter that is used.
overwrite : `bool`, optional
Whether or not to overwrite a file if it already exists.
Raises
------
ValueError
File already exists and ``overwrite`` != ``True``
ValueError
``fp`` is a `str` and the ``extension`` is not ``None``
and the two extensions do not match
ValueError
``fp`` is a `file`-like object and ``extension`` is
``None``
ValueError
The provided extension does not match to an existing exporter type
(the output type is not supported).
"""
_export(image, fp, image_types, extension, overwrite)
def export_video(images, file_path, overwrite=False, fps=30, **kwargs):
r"""
Exports a given list of images as a video. Ensure that all the images
have the same shape, otherwise you might get unexpected results from
the ffmpeg writer. The ``file_path`` argument is a `Path` representing
the path to save the video to. At this time, it is not possible
to export videos directly to a file buffer.
Due to the mix of string and file types, an explicit overwrite argument is
used which is ``False`` by default.
Note that exporting of GIF images is also supported.
Parameters
----------
images : list of :map:`Image`
The images to export as a video.
file_path : `Path`
The Path to save the video at. File buffers are not supported, unlike
other exporting formats.
overwrite : `bool`, optional
Whether or not to overwrite a file if it already exists.
fps : `int`, optional
The number of frames per second.
**kwargs : `dict`, optional
Extra parameters that are passed through directly to the exporter.
Please see the documentation in the ``menpo.io.output.video`` package
for information about the supported arguments.
Raises
------
ValueError
File already exists and ``overwrite`` != ``True``
ValueError
The input is a buffer and not a valid `Path`
ValueError
The provided extension does not match to an existing exporter type
(the output type is not supported).
"""
exporter_kwargs = {"fps": fps}
exporter_kwargs.update(kwargs)
file_path = _enforce_only_paths_supported(file_path, "FFMPEG")
_export_paths_only(
images, file_path, video_types, None, overwrite, exporter_kwargs=exporter_kwargs
)
def export_pickle(obj, fp, overwrite=False, protocol=2):
r"""
Exports a given collection of Python objects with Pickle.
The ``fp`` argument can be either a `Path` or any Python type that acts like
a file.
If ``fp`` is a path, it must have the suffix `.pkl` or `.pkl.gz`. If
`.pkl`, the object will be pickled using the selected Pickle protocol.
If `.pkl.gz` the object will be pickled using the selected Pickle
protocol with gzip compression (at a fixed compression level of 3).
Note that a special exception is made for `pathlib.Path` objects - they
are pickled down as a `pathlib.PurePath` so that pickles can be easily
moved between different platforms.
Parameters
----------
obj : ``object``
The object to export.
fp : `Path` or `file`-like object
The string path or file-like object to save the object at/into.
overwrite : `bool`, optional
Whether or not to overwrite a file if it already exists.
protocol : `int`, optional
The Pickle protocol used to serialize the file.
The protocols were introduced in different versions of python, thus
it is recommended to save with the highest protocol version that
your python distribution can support.
The protocol refers to:
========= =========================================================
Protocol Functionality
========= =========================================================
0 Simplest protocol for text mode, backwards compatible.
1 Protocol for binary mode, backwards compatible.
2 Wider support for classes, compatible with python >= 2.3.
3 Support for byte objects, compatible with python >= 3.0.
4 Support for large objects, compatible with python >= 3.4.
========= =========================================================
Raises
------
ValueError
File already exists and ``overwrite`` != ``True``
ValueError
``fp`` is a `file`-like object and ``extension`` is
``None``
ValueError
The provided extension does not match to an existing exporter type
(the output type is not supported).
"""
exporter_kwargs = {"protocol": protocol}
if isinstance(fp, str):
fp = Path(fp) # cheeky conversion to Path to reuse existing code
if isinstance(fp, Path):
# user provided a path - if it ended .gz we will compress
path_filepath = _validate_filepath(fp, overwrite)
extension = _parse_and_validate_extension(path_filepath, None, pickle_types)
o = gzip_open if extension[-3:] == ".gz" else open
with o(str(path_filepath), "wb") as f:
# force overwrite as True we've already done the check above
_export(
obj, f, pickle_types, extension, True, exporter_kwargs=exporter_kwargs
)
else:
_export(
obj, fp, pickle_types, ".pkl", overwrite, exporter_kwargs=exporter_kwargs
)
def _extension_to_export_function(extension, extensions_map):
r"""
Simple function that wraps the extensions map indexing and raises
a user friendly ``ValueError``
Parameters
----------
extension : `str`
The string extension with period prefix e.g '.jpg'
extensions_map : `dict` of `str` -> `callable`
The extension map that maps extensions to export callables.
Returns
-------
mapping_callable : `callable`
The callable that performs exporting.
Raises
------
ValueError
If ``extensions_map`` does not contain ``extension``. More friendly
than the ``KeyError`` that would be raised.
"""
# This just ensures that a sensible, user friendly Exception is raised.
try:
return extensions_map[extension]
except KeyError:
raise ValueError(
"The output file extension ({}) provided is not "
"currently supported.".format(extension)
)
def _validate_filepath(fp, overwrite):
r"""
Normalize a given file path and ensure that ``overwrite == True`` if the
file path exists. Normalisation involves things like making the given
path absolute and expanding environment variables and user variables.
Parameters
----------
fp : `Path`
The file path.
overwrite : `bool`
Whether the export method should override an existing file at the
file path.
Returns
-------
normalized_filepath : `Path`
The normalized file path.
Raises
------
OverwriteError
If ``overwrite == False`` and a file already exists at the file path.
"""
path_filepath = _norm_path(fp)
if path_filepath.exists() and not overwrite:
raise OverwriteError(
"File {} already exists. Please set the overwrite "
"kwarg if you wish to overwrite "
"the file.".format(path_filepath.name),
path_filepath,
)
return path_filepath
def _parse_and_validate_extension(filepath, extension, extensions_map):
r"""
If an extension is given, validate that the given file path matches
the given extension.
If not, parse the file path and return a correct extension. This function
will handle cases such as file names with periods in.
Parameters
----------
filepath : `Path`
The file path (normalized).
extension : `str`
The extension provided by the user.
extensions_map : `dict` of `str` -> `callable`
A dictionary mapping extensions to export callables.
Returns
-------
norm_extension : `str`
The correct extension, with leading period.
Raises
------
ValueError
Unknown extension.
ValueError
File path contains extension that does not EXACTLY match the users'
provided extension.
"""
# If an explicit extension is passed, it must match exactly. However, file
# names may contain periods, and therefore we need to try and parse
# a known extension from the given file path.
possible_exts = _possible_extensions_from_filepath(filepath)
known_extension = None
while known_extension is None and possible_exts:
possible_extension = possible_exts.pop(0)
if possible_extension in extensions_map:
known_extension = possible_extension
if known_extension is None:
raise ValueError(
"Unknown file extension passed: {}".format("".join(filepath.suffixes))
)
if extension is not None:
extension = _normalize_extension(extension)
if extension != known_extension:
raise ValueError(
"The file path extension must match the "
"requested file extension: {} != {}".format(extension, known_extension)
)
return known_extension
def _enforce_only_paths_supported(file_path, exporter_name):
r"""
If a given exporter only supports paths rather than open file handles
or buffers then this function can be used to enforce that. If a file
handle is passed then an attempt is made to write to the path of the file
handle.
Parameters
----------
file_path : `str` or `pathlib.Path` or file-like object
The file path to write to.
Returns
-------
file_path : `str`
The path to open file handle or, if a path was passed, it is returned
unchanged.
Raises
------
ValueError
If given ``file_path`` is not a string, pathlib.Path or file handle.
"""
if hasattr(file_path, "name") and not isinstance(file_path, Path):
file_path = file_path.name
warnings.warn(
"The {} exporter only supports file paths and not "
"buffers or open file handles - therefore the provided "
"file handle will be ignored and the object will be "
"exported to {}.".format(exporter_name, file_path)
)
if isinstance(file_path, (str, Path)):
return file_path
else:
raise ValueError("Cannot write to unnamed file handles or buffers.")
def _validate_and_get_export_func(
file_path, extensions_map, extension, overwrite, return_extension=False
):
r"""
Given a ``file_path``, ensure that the options chosen are valid with respect
to overwriting and any provided extensions. If this validation is
successful then the exporter function is returned.
Parameters
----------
file_path : `Path`
The path to write to.
extensions_map : `dict` of `str` -> `callable`
The dictionary mapping extensions to export callables.
extension : `str`
User provided extension (required if a file-like ``fp`` is passed).
overwrite : `bool`
If ``True``, overwrite any existing files at the given path.
return_extension : `bool`, optional
If ``True``, return the correct extension as well as the export
callable, as a tuple ``(callable, extension)``.
Returns
-------
exporter_callable : `callable`
The exporter callable.
extension : `str`
The correct extension for the exporter function, if
``return_extension==True``.
"""
if isinstance(file_path, str):
# cheeky conversion to Path to reuse existing code
file_path = Path(file_path)
file_path = _validate_filepath(file_path, overwrite)
extension = _parse_and_validate_extension(file_path, extension, extensions_map)
export_callable = _extension_to_export_function(extension, extensions_map)
if return_extension:
return export_callable, extension
else:
return export_callable
def _export_paths_only(
obj, file_path, extensions_map, extension, overwrite, exporter_kwargs=None
):
r"""
A shared export function handling paths only. This handles the logic
of ensuring that the given ``file_path`` is a ``pathlib.Path``. All exporter
methods that are called from here are defined as receiving a
``pathlib.Path``.
Parameters
----------
obj : `object`
The Python object to export.
file_path : `Path`
The path to write to.
extensions_map : `dict` of `str` -> `callable`
The dictionary mapping extensions to export callables.
extension : `str`
User provided extension (required if a file-like ``fp`` is passed).
overwrite : `bool`
If ``True``, overwrite any existing files at the given path.
exporter_kwargs : `int`, optional
Any kwargs to be passed through to the exporter.
"""
if exporter_kwargs is None:
exporter_kwargs = {}
export_function = _validate_and_get_export_func(
file_path, extensions_map, extension, overwrite
)
export_function(obj, file_path, **exporter_kwargs)
def _export(obj, fp, extensions_map, extension, overwrite, exporter_kwargs=None):
r"""
The shared export function. This handles the shared logic of ensuring
that the given ``fp`` is either a ``pathlib.Path`` or a file like
object. All exporter methods are defined as receiving a buffer object,
regardless of if a path is provided. If a file-like object is provided
then the extension mut not be ``None``.
Parameters
----------
obj : `object`
The Python object to export.
fp : `Path` or file-like object
The path or file buffer to write to.
extensions_map : `dict` of `str` -> `callable`
The dictionary mapping extensions to export callables.
extension : `str`
User provided extension (required if a file-like ``fp`` is passed).
overwrite : `bool`
If ``True``, overwrite any existing files at the given path.
exporter_kwargs : `int`, optional
Any kwargs to be passed through to the exporter.
"""
if exporter_kwargs is None:
exporter_kwargs = {}
if isinstance(fp, str):
fp = Path(fp) # cheeky conversion to Path to reuse existing code
if isinstance(fp, Path):
export_function, extension = _validate_and_get_export_func(
fp, extensions_map, extension, overwrite, return_extension=True
)
with fp.open("wb") as file_handle:
export_function(obj, file_handle, extension=extension, **exporter_kwargs)
else:
# You MUST provide an extension if a file handle is given
if extension is None:
raise ValueError(
"An export file extension must be provided if a "
"file-like object is passed."
)
else:
extension = _normalize_extension(extension)
# Apparently in Python 2.x there is no reliable way to detect something
# that is 'file' like (file handle or a StringIO object or something
# you can read and write to like a file). Therefore, we are going to
# just be really Pythonic about it and just assume we were handed
# a correctly behaving object.
try:
# Follow PIL like behaviour. Check the file handle extension
# and check if matches the given extension
export_function = _validate_and_get_export_func(
Path(fp.name), extensions_map, extension, overwrite
)
except AttributeError:
# Just use the extension to get the export function
export_function = _extension_to_export_function(extension, extensions_map)
export_function(obj, fp, extension=extension, **exporter_kwargs)
| bsd-3-clause | 1,479,423,852,352,776,400 | 36.428302 | 88 | 0.634118 | false |
ceache/treadmill | lib/python/treadmill/sysinfo.py | 2 | 10919 | """Helper module to get system related information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import io
import multiprocessing
import os
import platform
import socket
import psutil
from . import exc
from . import subproc
if os.name == 'posix':
from . import cgroups
from . import cgutils
else:
# Pylint warning unable to import because it is on Windows only
import docker
import win32api # pylint: disable=E0401
import win32con # pylint: disable=E0401
import win32security # pylint: disable=E0401
# Equate "virtual" CPU to 5000 bogomips.
BMIPS_PER_CPU = 4000
_BYTES_IN_MB = 1024 * 1024
def _disk_usage_linux(path):
"""Return disk usage associated with path."""
statvfs = os.statvfs(path)
total = statvfs.f_blocks * statvfs.f_frsize
free = statvfs.f_bavail * statvfs.f_frsize
return namedtuple('usage', 'total free')(total, free)
def _disk_usage_windows(path):
"""Return disk usage associated with path."""
dsk_use = psutil.disk_usage(path)
return namedtuple('usage', 'total free')(dsk_use.total, dsk_use.free)
_MEMINFO = None
def _mem_info_linux():
"""Return total/swap memory info from /proc/meminfo."""
global _MEMINFO # pylint: disable=W0603
if not _MEMINFO:
with io.open('/proc/meminfo') as meminfo:
total = None
swap = None
for line in meminfo.read().splitlines():
line = line[:-1]
if line.find('MemTotal') == 0:
total = int(line.split()[1])
if line.find('SwapTotal') == 0:
swap = int(line.split()[1])
_MEMINFO = namedtuple('memory', 'total swap')(total, swap)
return _MEMINFO
def _mem_info_windows():
"""Return total/swap memory info"""
global _MEMINFO # pylint: disable=W0603
if not _MEMINFO:
total = psutil.virtual_memory().total // 1024
swap = psutil.swap_memory().total // 1024
_MEMINFO = namedtuple('memory', 'total swap')(total, swap)
return _MEMINFO
def _proc_info_linux(pid):
"""Returns process exe filename and start time."""
filename = None
starttime = None
ppid = None
if pid is None:
raise exc.InvalidInputError('/proc', 'pid is undefined.')
with io.open('/proc/%s/stat' % pid, 'r') as stat:
for line in stat.read().splitlines():
fields = line.split()
# Filename is given in (), remove the brackets.
filename = fields[1][1:-1]
ppid = int(fields[3])
starttime = int(fields[21])
return namedtuple('proc', 'filename ppid starttime')(filename,
ppid,
starttime)
def _proc_info_windows(pid):
"""Returns process exe filename and start time."""
try:
process = psutil.Process(pid)
except Exception:
raise exc.InvalidInputError('proc', 'pid is undefined.')
return namedtuple('proc', 'filename ppid starttime')(process.name(),
process.ppid(),
process.create_time())
def cpu_count():
"""Return number of CPUs on the system."""
return multiprocessing.cpu_count()
def _available_cpu_count_linux():
"""Return number of CPUs available for treadmill."""
cores = cgutils.get_cpuset_cores('treadmill')
return len(cores)
def _bogomips_linux(cores):
"""Return sum of bogomips value for cores."""
total = 0
with io.open('/proc/cpuinfo') as cpuinfo:
for cpu in cpuinfo.read().split('\n\n'):
for line in cpu.splitlines():
if line.startswith('processor'):
if int(line.split(':')[1]) not in cores:
break
if line.startswith('bogomips'):
total += float(line.split(':')[1])
return int(total)
def _total_bogomips_linux():
"""Return sum of bogomips value for all CPUs."""
cores = cgutils.get_cpuset_cores('treadmill')
return _bogomips_linux(cores)
def _cpuflags_linux():
"""Return list of cpu flags."""
with io.open('/proc/cpuinfo') as cpuinfo:
for line in cpuinfo.read().splitlines():
if line.startswith('flags'):
flags = line.split(':')[1]
return flags.split()
return []
def _cpuflags_windows():
"""Return list of cpu flags."""
return []
def hostname():
"""Hostname of the server."""
treadmill_hostname = os.environ.get('TREADMILL_HOSTNAME')
if treadmill_hostname:
return treadmill_hostname.lower()
host_name = socket.gethostname()
port = 0
family = 0
socktype = 0
proto = 0
_family, _socktype, _proto, canonname, _sockaddr = socket.getaddrinfo(
host_name,
port,
family,
socktype,
proto,
socket.AI_CANONNAME)[0]
return canonname.lower()
def _port_range_linux():
"""Returns local port range."""
with io.open('/proc/sys/net/ipv4/ip_local_port_range', 'r') as f:
low, high = [int(i) for i in f.read().split()]
return low, high
def _port_range_windows():
"""Returns local port range."""
cmd = 'netsh.exe int ipv4 show dynamicport tcp'
output = subproc.check_output([cmd]).split('\r\n')
low = 0
ports = 0
for line in output:
if line.lower().startswith('start port'):
low = int(line.split(':')[1])
elif line.lower().startswith('number of ports'):
ports = int(line.split(':')[1])
high = ports - low + 1
return low, high
def _kernel_ver_linux():
"""Returns kernel version as major, minor, patch tuple."""
with io.open('/proc/sys/kernel/osrelease') as f:
kver = f.readline().split('.')[:3]
last = len(kver)
if last == 2:
kver.append('0')
last -= 1
for char in '-_':
kver[last] = kver[last].split(char)[0]
try:
int(kver[last])
except ValueError:
kver[last] = 0
return int(kver[0]), int(kver[1]), int(kver[2])
def _kernel_ver_windows():
"""Returns kernel version as major, minor, patch tuple."""
version = platform.platform().split('-')[2]
kver = version.split('.')
return int(kver[0]), int(kver[1]), int(kver[2])
def _hwmodel_linux():
"""Return hardware model of the host."""
with open('/sys/devices/virtual/dmi/id/product_name') as f:
return f.read().strip()
def _hwmodel_windows():
"""Return hardware model of the host."""
return None
def _get_docker_node_info(info):
"""Gets the node info specific to docker.
"""
cpucapacity = int(cpu_count() * 100)
memcapacity = (psutil.virtual_memory().total * 0.9) // _BYTES_IN_MB
# TODO: manage disk space a little better
client = docker.from_env()
docker_info = client.info()
path = docker_info['DockerRootDir']
diskfree = disk_usage(path).free // _BYTES_IN_MB
info.update({
'memory': '%dM' % memcapacity,
'disk': '%dM' % diskfree,
'cpu': '%d%%' % cpucapacity,
})
return info
def _node_info_linux(tm_env, runtime, cgroup_prefix, **_kwargs):
"""Generate a node information report for the scheduler.
:param tm_env:
Treadmill application environment
:type tm_env:
`appenv.AppEnvironment`
:param runtime:
Treadmill runtime in use
:type tm_env:
`str`
"""
info = {
'up_since': up_since(),
}
if runtime == 'linux':
# Request status information from services (this may wait for the
# services to be up).
localdisk_status = tm_env.svc_localdisk.status(timeout=30)
# FIXME: Memory and CPU available to containers should come from the
# cgroup service.
_cgroup_status = tm_env.svc_cgroup.status(timeout=30)
network_status = tm_env.svc_network.status(timeout=30)
# We normalize bogomips into logical "cores", each core == 5000 bmips.
# Each virtual "core" is then equated to 100 units.
# The formula is bmips / BMIPS_PER_CPU * 100
apps_group = cgutils.apps_group_name(cgroup_prefix)
app_bogomips = cgutils.get_cpu_shares(apps_group)
cpucapacity = (app_bogomips * 100) // BMIPS_PER_CPU
memcapacity = cgroups.get_value(
'memory',
apps_group,
'memory.limit_in_bytes'
) // _BYTES_IN_MB
diskfree = localdisk_status['size'] // _BYTES_IN_MB
info.update({
'memory': '%dM' % memcapacity,
'disk': '%dM' % diskfree,
'cpu': '%d%%' % cpucapacity,
'network': network_status,
'localdisk': localdisk_status,
})
else:
raise NotImplementedError(
'Runtime {0} is not supported on Linux'.format(runtime)
)
return info
def _node_info_windows(_tm_env, runtime, **_kwargs):
"""Generate a node information report for the scheduler.
:param _tm_env:
Treadmill application environment
:type _tm_env:
`appenv.AppEnvironment`
:param runtime:
Treadmill runtime in use
:type runtime:
`str`
"""
if runtime != 'docker':
# Raising an exception will ensure windows is started with docker
# runtime enabled
raise NotImplementedError(
'Runtime {0} is not supported on Windows'.format(runtime)
)
info = _get_docker_node_info({
'up_since': up_since(),
})
dc_name = win32security.DsGetDcName()
info.update({
'nt.dc': dc_name['DomainControllerName'].replace('\\\\', '').lower(),
'nt.domain': dc_name['DomainName'].lower(),
'nt.dn': win32api.GetComputerObjectName(win32con.NameFullyQualifiedDN)
})
return info
def up_since():
"""Returns time of last reboot."""
return psutil.boot_time()
# pylint: disable=C0103
if os.name == 'nt':
disk_usage = _disk_usage_windows
mem_info = _mem_info_windows
proc_info = _proc_info_windows
cpu_flags = _cpuflags_windows
port_range = _port_range_windows
kernel_ver = _kernel_ver_windows
node_info = _node_info_windows
hwmodel = _hwmodel_windows
else:
disk_usage = _disk_usage_linux
mem_info = _mem_info_linux
proc_info = _proc_info_linux
cpu_flags = _cpuflags_linux
bogomips = _bogomips_linux
total_bogomips = _total_bogomips_linux
port_range = _port_range_linux
kernel_ver = _kernel_ver_linux
node_info = _node_info_linux
hwmodel = _hwmodel_linux
available_cpu_count = _available_cpu_count_linux
| apache-2.0 | -6,205,952,866,836,848,000 | 27.287565 | 79 | 0.586592 | false |
ktsamis/repose | repose/connection.py | 2 | 11725 | import errno
import os
import select
import socket
import sys
import getpass
from traceback import format_exc
import logging
import paramiko
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
logger = logging.getLogger("repose.connection")
class CommandTimeout(Exception):
"""remote command timeout exception
returns timed out remote command as __str__
"""
def __init__(self, command=None):
self.command = command
def __str__(self):
return repr(self.command)
class Connection(object):
"""Manage ssh/sftp connection"""
def __init__(self, hostname, username, port, timeout=120):
""" openSSH channel to the specified host
Tries AuthKey Authentication and falls back to password mode
in case of errors.
If a connection can't be established (host not available, wrong password/key)
exceptions are reraised from the ssh subsystem and need to be catched
by the caller.
"""
self.username = username
self.hostname = hostname
try:
self.port = int(port)
except Exception:
self.port = 22
self.timeout = timeout
self.client = paramiko.SSHClient()
def __repr__(self):
return "<{} object username={} hostname={} port={}>".format(
self.__class__.__name__, self.username, self.hostname, self.port
)
def __load_keys(self):
self.client.load_system_host_keys()
# Dont check host keys --> StrictHostChecking no
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def connect(self):
cfg = paramiko.config.SSHConfig()
self.__load_keys()
try:
with open(os.path.expanduser("~/.ssh/config")) as fd:
cfg.parse(fd)
except IOError as e:
if e.errno != errno.ENOENT:
logger.warning(e)
opts = cfg.lookup(self.hostname)
try:
logger.debug("connecting to {}:{}".format(self.hostname, self.port))
# if this fails, the user most likely has none or an outdated
# hostkey for the specified host. checking back with a manual
# "ssh root@..." invocation helps in most cases.
self.client.connect(
hostname=opts.get("hostname", self.hostname)
if "proxycommand" not in opts
else self.hostname,
port=int(opts.get("port", self.port)),
username=opts.get("user", self.username),
key_filename=opts.get("identityfile", None),
sock=paramiko.ProxyCommand(opts["proxycommand"])
if "proxycommand" in opts
else None,
)
except (paramiko.AuthenticationException, paramiko.BadHostKeyException):
# if public key auth fails, fallback to a password prompt.
# other than ssh, mtui asks only once for a password. this could
# be changed if there is demand for it.
logger.warning(
"Authentication failed on {}: AuthKey missing.".format(self.hostname)
)
logger.warning("Trying manually, please enter the root password")
password = getpass.getpass()
try:
# try again with password auth instead of public/private key
self.client.connect(
hostname=opts.get("hostname", self.hostname)
if "proxycommand" not in opts
else self.hostname,
port=int(opts.get("port", self.port)),
username=opts.get("user", self.username),
password=password,
sock=paramiko.ProxyCommand(opts["proxycommand"])
if "proxycommand" in opts
else None,
)
except paramiko.AuthenticationException:
# if a wrong password was set, don't connect to the host and
# reraise the exception hoping it's catched somewhere in an
# upper layer.
logger.error(
"Authentication failed on {}: wrong password".format(self.hostname)
)
raise
except paramiko.SSHException:
# unspecified general SSHException. the host/sshd is probably not
# available.
logger.error("SSHException while connecting to {}".format(self.hostname))
raise
except Exception as error:
# general Exception
logger.error("{}: {}".format(self.hostname, error))
raise
def reconnect(self):
if not self.is_active():
logger.debug(
"lost connection to {}:{}, reconnecting".format(
self.hostname, self.port
)
)
self.connect()
assert self.is_active()
def new_session(self):
logger.debug("Creating new session at {}:{}".format(self.hostname, self.port))
try:
transport = self.client.get_transport()
transport.set_keepalive(60)
session = transport.open_session()
session.setblocking(0)
session.settimeout(0)
except paramiko.SSHException:
logger.debug(
"Creating of new session at {}:{} failed".format(
self.hostname, self.port
)
)
if "session" in locals():
session.close()
session = None
return session
@staticmethod
def close_session(session=None):
"""close the current session"""
if session:
try:
session.shutdown(2)
session.close()
except BaseException:
# pass all exceptions since the session is already closed or broken
pass
def __run_command(self, command):
""" open new session and run command in it
parameter: command -> str
result: Succes - session instance with running command
Fail - False
"""
try:
session = self.new_session()
session.exec_command(command)
except (AttributeError, paramiko.ChannelException, paramiko.SSHException):
if "session" in locals():
if isinstance(session, paramiko.channel.Channel):
self.close_session(session)
return False
return session
def run(self, command, lock=None):
"""run command over SSH channel
Blocks until command terminates. returncode of issued command is returned.
In case of errors, -1 is returned.
If the connection hits the timeout limit, the user is asked to wait or
cancel the current command.
Keyword arguments:
command -- the command to run
lock -- lock object for write on stdout
"""
stdout = b""
stderr = b""
session = self.__run_command(command)
while not session:
self.reconnect()
session = self.__run_command(command)
while True:
buf = b""
# wait for data to be transmitted. if the timeout is hit,
# ask the user on how to procceed
if select.select([session], [], [], self.timeout) == ([], [], []):
assert session
# writing on stdout needs locking as all run threads could
# write at the same time to stdout
if lock:
lock.acquire()
try:
if input(
'command "%s" timed out on %s. wait? (y/N) '
% (command, self.hostname)
).lower() in ["y", "yes"]:
continue
else:
# if the user don't want to wait, raise CommandTimeout
# and procceed
raise CommandTimeout
finally:
# release lock to allow other command threads to write to
# stdout
if lock:
lock.release()
try:
# wait for data on the session's stdout/stderr. if debug is enabled,
# print the received data
if session.recv_ready():
buf = session.recv(1024)
stdout += buf
for line in buf.decode("utf-8", "ignore").split("\n"):
if line:
logger.debug(line)
if session.recv_stderr_ready():
buf = session.recv_stderr(1024)
stderr += buf
for line in buf.decode("utf-8", "ignore").split("\n"):
if line:
logger.debug(line)
if not buf:
break
except socket.timeout:
select.select([], [], [], 1)
# save the exitcode of the last command and return it
exitcode = session.recv_exit_status()
self.close_session(session)
return (stdout.decode(), stderr.decode(), exitcode)
def __sftp_open(self):
try:
sftp = self.client.open_sftp()
except (AttributeError, paramiko.ChannelException, paramiko.SSHException):
if "sftp" in locals():
if isinstance(sftp, paramiko.sftp_client.SFTPClient):
sftp.close()
return False
return sftp
def __sftp_reconnect(self):
sftp = self.__sftp_open()
while not sftp:
self.reconnect()
sftp = self.__sftp_open()
return sftp
def listdir(self, path="."):
"""get directory listing of the remote host
Keyword arguments:
path -- remote directory path to list
"""
logger.debug(
"getting {!s}:{!s}:{!s} listing".format(self.hostname, self.port, path)
)
sftp = self.__sftp_reconnect()
listdir = sftp.listdir(path)
sftp.close()
return listdir
def open(self, filename, mode="r", bufsize=-1):
"""open remote file
default mode is reading
can be used as context manager
"""
logger.debug("{0} open({1}, {2})".format(repr(self), filename, mode))
logger.debug(" -> self.client.open_sftp")
sftp = self.__sftp_reconnect()
logger.debug(" -> sftp.open")
try:
ofile = sftp.open(filename, mode, bufsize)
except BaseException:
logger.debug(format_exc())
# TODO: recheck if is needed
if "sftp" in locals():
if isinstance(sftp, paramiko.sftp_client.SFTPClient):
sftp.close()
raise
return ofile
def readlink(self, path):
""" Return the target of a symbolic link (shortcut)."""
logger.debug("read link {}:{}:{}".format(self.hostname, self.port, path))
sftp = self.__sftp_reconnect()
link = sftp.readlink(path)
sftp.close()
return link
def is_active(self):
return self.client._transport and self.client._transport.is_active()
def close(self):
"""closes SSH channel to host and disconnects
Keyword arguments: None
"""
logger.debug("closing connection to {}:{}".format(self.hostname, self.port))
self.client.close()
| gpl-3.0 | -2,305,095,095,845,652,200 | 31.935393 | 87 | 0.532111 | false |
duncan-brown/pycbc | pycbc/fft/backend_cpu.py | 12 | 1283 | # Copyright (C) 2014 Josh Willis
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from .core import _list_available
_backend_dict = {'fftw' : 'fftw',
'mkl' : 'mkl',
'numpy' : 'npfft'}
_backend_list = ['fftw','mkl','numpy']
_alist, _adict = _list_available(_backend_list, _backend_dict)
cpu_backend = None
def set_backend(backend_list):
global cpu_backend
for backend in backend_list:
if backend in _alist:
cpu_backend = backend
break
def get_backend():
return _adict[cpu_backend]
set_backend(_backend_list)
| gpl-3.0 | -2,760,232,491,197,314,600 | 31.075 | 71 | 0.685113 | false |
renard/ansible-modules-core | files/copy.py | 7 | 11951 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
DOCUMENTATION = '''
---
module: copy
version_added: "historical"
short_description: Copies files to remote locations.
description:
- The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box.
options:
src:
description:
- Local path to a file to copy to the remote server; can be absolute or relative.
If path is a directory, it is copied recursively. In this case, if path ends
with "/", only inside contents of that directory are copied to destination.
Otherwise, if it does not end with "/", the directory itself with all contents
is copied. This behavior is similar to Rsync.
required: false
default: null
aliases: []
content:
version_added: "1.1"
description:
- When used instead of 'src', sets the contents of a file directly to the specified value.
This is for simple values, for anything complex or with formatting please switch to the template module.
required: false
default: null
dest:
description:
- Remote absolute path where the file should be copied to. If src is a directory,
this must be a directory too.
required: true
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
version_added: "0.7"
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
version_added: "1.1"
required: false
choices: [ "yes", "no" ]
default: "yes"
aliases: [ "thirsty" ]
directory_mode:
description:
- When doing a recursive copy set the mode for the directories. If this is not set we will use the system
defaults. The mode is only set on directories which are newly created, and will not affect those that
already existed.
required: false
version_added: "1.5"
extends_documentation_fragment: files
extends_documentation_fragment: validate
author:
- "Ansible Core Team"
- "Michael DeHaan"
notes:
- The "copy" module recursively copy facility does not scale to lots (>hundreds) of files.
For alternative, see synchronize module, which is a wrapper around rsync.
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode=0644
# The same example as above, but using a symbolic mode equivalent to 0644
- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u=rw,g=r,o=r"
# Another symbolic mode example, adding some permissions and removing others
- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u+rw,g-wx,o-rwx"
# Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version
- copy: src=/mine/ntp.conf dest=/etc/ntp.conf owner=root group=root mode=644 backup=yes
# Copy a new "sudoers" file into place, after passing validation with visudo
- copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
'''
RETURN = '''
dest:
description: destination file/path
returned: success
type: string
sample: "/path/to/file.txt"
src:
description: source file used for the copy on the target machine
returned: changed
type: string
sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source"
md5sum:
description: md5 checksum of the file after running copy
returned: when supported
type: string
sample: "2a5aeecc61dc98c4d780b14b330e3282"
checksum:
description: checksum of the file after running copy
returned: success
type: string
sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827"
backup_file:
description: name of backup file created
returned: changed and if backup=yes
type: string
sample: "/path/to/file.txt.2015-02-12@22:09~"
gid:
description: group id of the file, after execution
returned: success
type: int
sample: 100
group:
description: group of the file, after execution
returned: success
type: string
sample: "httpd"
owner:
description: owner of the file, after execution
returned: success
type: string
sample: "httpd"
uid:
description: owner id of the file, after execution
returned: success
type: int
sample: 100
mode:
description: permissions of the target, after execution
returned: success
type: string
sample: "0644"
size:
description: size of the target, after execution
returned: success
type: int
sample: 1220
state:
description: permissions of the target, after execution
returned: success
type: string
sample: "file"
'''
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
'''
head, tail = os.path.split(dirname)
if not os.path.exists(head):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
return (head, [ tail ])
new_directory_list.append(tail)
return (pre_existing_dir, new_directory_list)
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
'''
Walk the new directories list and make sure that permissions are as we would expect
'''
if len(new_directory_list) > 0:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
src = dict(required=False),
original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack
content = dict(required=False, no_log=True),
dest = dict(required=True),
backup = dict(default=False, type='bool'),
force = dict(default=True, aliases=['thirsty'], type='bool'),
validate = dict(required=False, type='str'),
directory_mode = dict(required=False)
),
add_file_common_args=True,
supports_check_mode=True,
)
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
backup = module.params['backup']
force = module.params['force']
original_basename = module.params.get('original_basename',None)
validate = module.params.get('validate',None)
follow = module.params['follow']
if not os.path.exists(src):
module.fail_json(msg="Source %s failed to transfer" % (src))
if not os.access(src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src))
checksum_src = module.sha1(src)
checksum_dest = None
# Backwards compat only. This will be None in FIPS mode
try:
md5sum_src = module.md5(src)
except ValueError:
md5sum_src = None
changed = False
# Special handling for recursive copy - create intermediate dirs
if original_basename and dest.endswith(os.sep):
dest = os.path.join(dest, original_basename)
dirname = os.path.dirname(dest)
if not os.path.exists(dirname) and os.path.isabs(dirname):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
os.makedirs(dirname)
directory_args = module.load_file_common_arguments(module.params)
directory_mode = module.params["directory_mode"]
if directory_mode is not None:
directory_args['mode'] = directory_mode
else:
directory_args['mode'] = None
adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
if os.path.exists(dest):
if os.path.islink(dest) and follow:
dest = os.path.realpath(dest)
if not force:
module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
if (os.path.isdir(dest)):
basename = os.path.basename(src)
if original_basename:
basename = original_basename
dest = os.path.join(dest, basename)
if os.access(dest, os.R_OK):
checksum_dest = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(dest)):
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(dest))
except OSError, e:
if "permission denied" in str(e).lower():
module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
if not os.access(os.path.dirname(dest), os.W_OK):
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None
if checksum_src != checksum_dest or os.path.islink(dest):
try:
if backup:
if os.path.exists(dest):
backup_file = module.backup_local(dest)
# allow for conversion from symlink.
if os.path.islink(dest):
os.unlink(dest)
open(dest, 'w').close()
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc,out,err) = module.run_command(validate % src)
if rc != 0:
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err))
module.atomic_move(src, dest)
except IOError:
module.fail_json(msg="failed to copy: %s to %s" % (src, dest))
changed = True
else:
changed = False
res_args = dict(
dest = dest, src = src, md5sum = md5sum_src, checksum = checksum_src, changed = changed
)
if backup_file:
res_args['backup_file'] = backup_file
module.params['dest'] = dest
file_args = module.load_file_common_arguments(module.params)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 7,187,783,210,496,133,000 | 36.700315 | 155 | 0.651912 | false |
stefan-caraiman/cloudbase-init | cloudbaseinit/tests/plugins/windows/test_winrmlistener.py | 1 | 7113 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.plugins.common import base
from cloudbaseinit.tests import testutils
CONF = cloudbaseinit_conf.CONF
class ConfigWinRMListenerPluginTests(unittest.TestCase):
def setUp(self):
self._mock_wintypes = mock.MagicMock()
self._mock_pywintypes = mock.MagicMock()
self._mock_win32 = mock.MagicMock()
self._moves_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'ctypes': self._mock_wintypes,
'ctypes.wintypes': self._mock_wintypes,
'pywintypes': self._mock_pywintypes,
'win32com': self._mock_win32,
'six.moves': self._moves_mock})
self._module_patcher.start()
self._winreg_mock = self._moves_mock.winreg
winrmlistener = importlib.import_module('cloudbaseinit.plugins.'
'windows.winrmlistener')
self._winrmlistener = winrmlistener.ConfigWinRMListenerPlugin()
def tearDown(self):
self._module_patcher.stop()
def _test_check_winrm_service(self, service_exists):
mock_osutils = mock.MagicMock()
mock_osutils.check_service_exists.return_value = service_exists
mock_osutils.SERVICE_START_MODE_MANUAL = 'fake start'
mock_osutils.SERVICE_START_MODE_DISABLED = 'fake start'
mock_osutils.SERVICE_STATUS_STOPPED = 'fake status'
mock_osutils.get_service_start_mode.return_value = 'fake start'
mock_osutils.get_service_status.return_value = 'fake status'
with testutils.LogSnatcher('cloudbaseinit.plugins.windows.'
'winrmlistener') as snatcher:
response = self._winrmlistener._check_winrm_service(mock_osutils)
if not service_exists:
expected_logging = [
"Cannot configure the WinRM listener as the service "
"is not available"
]
self.assertEqual(expected_logging, snatcher.output)
self.assertFalse(response)
else:
mock_osutils.get_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.get_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.set_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name,
mock_osutils .SERVICE_START_MODE_AUTOMATIC)
mock_osutils.get_service_status.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.start_service.assert_called_once_with(
self._winrmlistener._winrm_service_name)
self.assertTrue(response)
def test_check_winrm_service(self):
self._test_check_winrm_service(service_exists=True)
def test_check_winrm_service_no_service(self):
self._test_check_winrm_service(service_exists=False)
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._check_winrm_service')
@mock.patch('cloudbaseinit.utils.windows.winrmconfig.WinRMConfig')
@mock.patch('cloudbaseinit.utils.windows.x509.CryptoAPICertManager'
'.create_self_signed_cert')
@mock.patch('cloudbaseinit.utils.windows.security.WindowsSecurityUtils'
'.set_uac_remote_restrictions')
@mock.patch('cloudbaseinit.utils.windows.security.WindowsSecurityUtils'
'.get_uac_remote_restrictions')
def _test_execute(self, get_uac_rs, set_uac_rs, mock_create_cert,
mock_WinRMConfig,
mock_check_winrm_service, mock_get_os_utils,
service_status):
mock_service = mock.MagicMock()
mock_listener_config = mock.MagicMock()
mock_cert_thumbprint = mock.MagicMock()
shared_data = 'fake data'
mock_osutils = mock.MagicMock()
mock_get_os_utils.return_value = mock_osutils
mock_check_winrm_service.return_value = service_status
mock_create_cert.return_value = mock_cert_thumbprint
mock_WinRMConfig().get_listener.return_value = mock_listener_config
mock_listener_config.get.return_value = 9999
mock_osutils.check_os_version.side_effect = [True, False]
get_uac_rs.return_value = True
expected_check_version_calls = [mock.call(6, 0), mock.call(6, 2)]
expected_set_token_calls = [mock.call(enable=False),
mock.call(enable=True)]
response = self._winrmlistener.execute(mock_service, shared_data)
mock_get_os_utils.assert_called_once_with()
mock_check_winrm_service.assert_called_once_with(mock_osutils)
if not service_status:
self.assertEqual((base.PLUGIN_EXECUTE_ON_NEXT_BOOT,
service_status), response)
else:
self.assertEqual(expected_check_version_calls,
mock_osutils.check_os_version.call_args_list)
self.assertEqual(expected_set_token_calls,
set_uac_rs.call_args_list)
mock_WinRMConfig().set_auth_config.assert_called_once_with(
basic=CONF.winrm_enable_basic_auth)
mock_create_cert.assert_called_once_with(
self._winrmlistener._cert_subject)
mock_WinRMConfig().get_listener.assert_called_with(
protocol="HTTPS")
mock_WinRMConfig().delete_listener.assert_called_once_with(
protocol="HTTPS")
mock_WinRMConfig().create_listener.assert_called_once_with(
protocol="HTTPS", cert_thumbprint=mock_cert_thumbprint)
mock_listener_config.get.assert_called_once_with("Port")
mock_osutils.firewall_create_rule.assert_called_once_with(
"WinRM HTTPS", 9999, mock_osutils.PROTOCOL_TCP)
self.assertEqual((base.PLUGIN_EXECUTION_DONE, False), response)
def test_execute(self):
self._test_execute(service_status=True)
def test_execute_service_status_is_false(self):
self._test_execute(service_status=False)
| apache-2.0 | -434,534,336,273,615,700 | 43.180124 | 78 | 0.640799 | false |
elba7r/builder | frappe/integrations/doctype/dropbox_settings/dropbox_settings.py | 2 | 10584 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import os
from frappe import _
from frappe.utils.backups import new_backup
from frappe.utils.background_jobs import enqueue
from frappe.utils import (cint, split_emails, get_request_site_address, cstr,
get_files_path, get_backups_path, encode)
from frappe.integration_broker.doctype.integration_service.integration_service import IntegrationService
ignore_list = [".DS_Store"]
class DropboxSettings(IntegrationService):
scheduler_events = {
"daily_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_daily"
],
"weekly_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_weekly"
]
}
def onload(self):
if not self.app_access_key and frappe.conf.dropbox_access_key:
self.dropbox_setup_via_site_config = 1
def validate(self):
if not self.flags.ignore_mandatory:
self.validate_dropbox_credentails()
def on_update(self):
pass
def enable(self):
""" enable service """
if not self.flags.ignore_mandatory:
self.validate_dropbox_credentails()
def validate_dropbox_credentails(self):
try:
self.get_dropbox_session()
except Exception, e:
frappe.throw(e.message)
def get_dropbox_session(self):
try:
from dropbox import session
except:
raise Exception(_("Please install dropbox python module"))
app_access_key = self.app_access_key or frappe.conf.dropbox_access_key
app_secret_key = self.get_password(fieldname="app_secret_key",
raise_exception=False) if self.app_secret_key else frappe.conf.dropbox_secret_key
if not (app_access_key or app_secret_key):
raise Exception(_("Please set Dropbox access keys in your site config"))
sess = session.DropboxSession(app_access_key, app_secret_key, "app_folder")
return sess
@frappe.whitelist()
def get_service_details():
return """
<div>
Steps to enable dropbox backup service:
<ol>
<li> Create a dropbox app then get App Key and App Secret,
<a href="https://www.dropbox.com/developers/apps" target="_blank">
https://www.dropbox.com/developers/apps
</a>
</li>
<br>
<li> Setup credentials on Dropbox Settings doctype.
Click on
<button class="btn btn-default btn-xs disabled"> Dropbox Settings </button>
top right corner
</li>
<br>
<li> After settings up App key and App Secret, generate access token
<button class="btn btn-default btn-xs disabled"> Allow Dropbox Access </button>
</li>
<br>
<li>
After saving settings,
<label>
<span class="input-area">
<input type="checkbox" class="input-with-feedback" checked disabled>
</span>
<span class="label-area small">Enable</span>
</label>
Dropbox Integration Service and Save a document.
</li>
</ol>
<p>
After enabling service, system will take backup of files and database on daily or weekly basis
as per set on Dropbox Settings page and upload it to your dropbox.
</p>
</div>
"""
#get auth token
@frappe.whitelist()
def get_dropbox_authorize_url():
doc = frappe.get_doc("Dropbox Settings")
sess = doc.get_dropbox_session()
request_token = sess.obtain_request_token()
doc.update({
"dropbox_access_key": request_token.key,
"dropbox_access_secret": request_token.secret
})
doc.save(ignore_permissions=False)
return_address = get_request_site_address(True) \
+ "?cmd=frappe.integrations.doctype.dropbox_settings.dropbox_settings.dropbox_callback"
url = sess.build_authorize_url(request_token, return_address)
return {
"url": url,
"dropbox_access_key": request_token.key,
"dropbox_access_secret": request_token.secret
}
@frappe.whitelist(allow_guest=True)
def dropbox_callback(oauth_token=None, not_approved=False):
doc = frappe.get_doc("Dropbox Settings")
close = '<p class="text-muted">' + _('Please close this window') + '</p>'
if not not_approved:
if doc.get_password(fieldname="dropbox_access_key", raise_exception=False)==oauth_token:
sess = doc.get_dropbox_session()
sess.set_request_token(doc.get_password(fieldname="dropbox_access_key", raise_exception=False),
doc.get_password(fieldname="dropbox_access_secret", raise_exception=False))
access_token = sess.obtain_access_token()
frappe.db.set_value("Dropbox Settings", None, "dropbox_access_key", access_token.key)
frappe.db.set_value("Dropbox Settings", None, "dropbox_access_secret", access_token.secret)
frappe.db.commit()
else:
frappe.respond_as_web_page(_("Dropbox Setup"),
_("Illegal Access Token. Please try again") + close,
success=False, http_status_code=frappe.AuthenticationError.http_status_code)
else:
frappe.respond_as_web_page(_("Dropbox Setup"),
_("You did not apporve Dropbox Access.") + close,
success=False, http_status_code=frappe.AuthenticationError.http_status_code)
frappe.respond_as_web_page(_("Dropbox Setup"),
_("Dropbox access is approved!") + close,
success=False, http_status_code=frappe.AuthenticationError.http_status_code)
# backup process
@frappe.whitelist()
def take_backup():
"Enqueue longjob for taking backup to dropbox"
enqueue("frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backup_to_dropbox", queue='long')
frappe.msgprint(_("Queued for backup. It may take a few minutes to an hour."))
def take_backups_daily():
take_backups_if("Daily")
def take_backups_weekly():
take_backups_if("Weekly")
def take_backups_if(freq):
if frappe.db.get_value("Dropbox Settings", None, "backup_frequency") == freq:
take_backup_to_dropbox()
def take_backup_to_dropbox():
did_not_upload, error_log = [], []
try:
if cint(frappe.db.get_value("Integration Service", "Dropbox", "enabled")):
did_not_upload, error_log = backup_to_dropbox()
if did_not_upload: raise Exception
send_email(True, "Dropbox")
except Exception:
file_and_error = [" - ".join(f) for f in zip(did_not_upload, error_log)]
error_message = ("\n".join(file_and_error) + "\n" + frappe.get_traceback())
frappe.errprint(error_message)
send_email(False, "Dropbox", error_message)
def send_email(success, service_name, error_status=None):
if success:
subject = "Backup Upload Successful"
message ="""<h3>Backup Uploaded Successfully</h3><p>Hi there, this is just to inform you
that your backup was successfully uploaded to your %s account. So relax!</p>
""" % service_name
else:
subject = "[Warning] Backup Upload Failed"
message ="""<h3>Backup Upload Failed</h3><p>Oops, your automated backup to %s
failed.</p>
<p>Error message: <br>
<pre><code>%s</code></pre>
</p>
<p>Please contact your system manager for more information.</p>
""" % (service_name, error_status)
if not frappe.db:
frappe.connect()
recipients = split_emails(frappe.db.get_value("Dropbox Settings", None, "send_notifications_to"))
frappe.sendmail(recipients=recipients, subject=subject, message=message)
def backup_to_dropbox():
if not frappe.db:
frappe.connect()
dropbox_client = get_dropbox_client()
# upload database
backup = new_backup(ignore_files=True)
filename = os.path.join(get_backups_path(), os.path.basename(backup.backup_path_db))
dropbox_client = upload_file_to_dropbox(filename, "/database", dropbox_client)
frappe.db.close()
# upload files to files folder
did_not_upload = []
error_log = []
dropbox_client = upload_from_folder(get_files_path(), "/files", dropbox_client, did_not_upload, error_log)
dropbox_client = upload_from_folder(get_files_path(is_private=1), "/private/files", dropbox_client, did_not_upload, error_log)
frappe.connect()
return did_not_upload, list(set(error_log))
def get_dropbox_client(previous_dropbox_client=None):
from dropbox import client
doc = frappe.get_doc("Dropbox Settings")
sess = doc.get_dropbox_session()
sess.set_token(doc.get_password(fieldname="dropbox_access_key", raise_exception=False),
doc.get_password(fieldname="dropbox_access_secret", raise_exception=False))
dropbox_client = client.DropboxClient(sess)
# upgrade to oauth2
token = dropbox_client.create_oauth2_access_token()
dropbox_client = client.DropboxClient(token)
if previous_dropbox_client:
dropbox_client.connection_reset_count = previous_dropbox_client.connection_reset_count + 1
else:
dropbox_client.connection_reset_count = 0
return dropbox_client
def upload_file_to_dropbox(filename, folder, dropbox_client):
from dropbox import rest
size = os.stat(encode(filename)).st_size
with open(filename, 'r') as f:
# if max packet size reached, use chunked uploader
max_packet_size = 4194304
if size > max_packet_size:
uploader = dropbox_client.get_chunked_uploader(f, size)
while uploader.offset < size:
try:
uploader.upload_chunked()
uploader.finish(folder + "/" + os.path.basename(filename), overwrite=True)
except rest.ErrorResponse, e:
# if "[401] u'Access token not found.'",
# it means that the user needs to again allow dropbox backup from the UI
# so re-raise
exc_message = cstr(e)
if (exc_message.startswith("[401]")
and dropbox_client.connection_reset_count < 10
and exc_message != "[401] u'Access token not found.'"):
# session expired, so get a new connection!
# [401] u"The given OAuth 2 access token doesn't exist or has expired."
dropbox_client = get_dropbox_client(dropbox_client)
else:
raise
else:
dropbox_client.put_file(folder + "/" + os.path.basename(filename), f, overwrite=True)
return dropbox_client
def upload_from_folder(path, dropbox_folder, dropbox_client, did_not_upload, error_log):
import dropbox.rest
if not os.path.exists(path):
return
try:
response = dropbox_client.metadata(dropbox_folder)
except dropbox.rest.ErrorResponse, e:
# folder not found
if e.status==404:
response = {"contents": []}
else:
raise
for filename in os.listdir(path):
filename = cstr(filename)
if filename in ignore_list:
continue
found = False
filepath = os.path.join(path, filename)
for file_metadata in response["contents"]:
if (os.path.basename(filepath) == os.path.basename(file_metadata["path"])
and os.stat(encode(filepath)).st_size == int(file_metadata["bytes"])):
found = True
break
if not found:
try:
dropbox_client = upload_file_to_dropbox(filepath, dropbox_folder, dropbox_client)
except Exception:
did_not_upload.append(filename)
error_log.append(frappe.get_traceback())
return dropbox_client
| mit | 1,453,086,745,566,155,500 | 31.170213 | 127 | 0.713813 | false |
joshuaohana/StarTrekDeck | main.py | 1 | 2010 | #TODO is import os a thing?
import card_data
from Player import Player
from Deck import Deck
from Hand import Hand
from CardTypes.shipcard import shipcard
from Discardpile import Discardpile
Basicship= shipcard('Nick\'s Ship', 0, 0 ,0, 8)
NicksDeck = Deck('Nick\'s Badass Mothafuckin Deck',[card_data.cards['Ensign'], card_data.cards['Ensign'], card_data.cards['Ensign'], card_data.cards['Ensign'],
card_data.cards['Ensign'], card_data.cards['Ensign'], card_data.cards['Commander'], card_data.cards['William Riker'],card_data.cards['Lieutenant'],card_data.cards['Ensign']], Basicship)
PlayerNick = Player('Player Nick', NicksDeck, Basicship)
MaxinesDeck = Deck('Maxine\'s Wooftastic Deck', [card_data.cards['Duras'], card_data.cards['Jean Luc Picard'], card_data.cards['Commander'], card_data.cards['Jean Luc Picard'],
card_data.cards['Warp Speed'], card_data.cards['Fire All Weapons'], card_data.cards['Commander'], card_data.cards['William Riker'],card_data.cards['Lieutenant'],card_data.cards['Ensign']], Basicship)
# MaxinesHand = Hand('Maxine\'s first hand', MaxinesDeck, Basicship)
# MaxinesDiscard = Discardpile(MaxinesHand)
PlayerMaxine = Player('Player Maxine', MaxinesDeck, Basicship)
print(NicksDeck.drawhand())
# NicksDiscard.addhand(None)
# print(NicksDiscard)
# print(card_data.cards['Jean Luc Picard'].flavortext)
# NicksDeck.printdeck()
# print(NicksDeck.countdeck())
# NicksDeck.changedeckname('Nick\'s REALLY Badass Mothafuckin Deck')
# NicksHand.calcstats()
# print(NicksHand.calcstats())
#TODO look for instances of self in drawcard
# NicksDeck.addcard(Ensign)
# print(NicksDeck.count_cards())
# # fuck that ensign he sucks
# # you wanna use the name of the cards,
# # since down the road you will no longer have a reference to the card item itself, but you'll always know its name
# print(NicksDeck.trashcard('Ensign', count='all'))
# print(NicksDeck.count_cards())
# print(NicksDeck.trashcard('Lieutenant', count=1))
# print(NicksDeck.count_cards()) | gpl-3.0 | 8,055,695,909,640,823,000 | 44.704545 | 215 | 0.736318 | false |
daineseh/linkit-smart-7688-duo-example | servo_motor_FS90R/control_FS90R.py | 1 | 1129 | #!/usr/bin/env python
import serial
import sys
s = None
def setup():
global s
# open serial COM port to /dev/ttyS0, which maps to UART0(D0/D1)
# the baudrate is set to 57600 and should be the same as the one
# specified in the Arduino sketch uploaded to ATMega32U4.
s = serial.Serial("/dev/ttyS0", 57600)
def is_invaild_value(str_value):
if not str_value.isdigit():
return True
value = int(str_value)
if value == 0:
return False
elif value == 1:
return False
elif ((value >= 700) and (value <= 2300)):
return False
else:
return True
# Value Definition
# 0: Stop the servo
# 1: Start the servo
# 700~1400: CW
# 1400~2300: CCW
def main():
if len(sys.argv) < 2 or is_invaild_value(sys.argv[1]):
print("$relay.py [0 | 1 | 700~2300]")
print("Value Definition:")
print("="*18)
print(" 0: Stop the servo")
print(" 1: Start the servo")
print(" 700~1400: CW")
print(" 1400~2300: CCW")
return
s.write(sys.argv[1])
if __name__ == '__main__':
setup()
main()
| mit | -7,314,979,898,884,196,000 | 20.711538 | 68 | 0.571302 | false |
Andrew-McNab-UK/DIRAC | Core/Base/Client.py | 2 | 3300 | """ Base class for DIRAC Client """
__RCSID__ = "$Id$"
from DIRAC.Core.DISET.RPCClient import RPCClient
class Client( object ):
""" Simple class to redirect unknown actions directly to the server. Arguments
to the constructor are passed to the RPCClient constructor as they are.
Some of them can however be overwritten at each call (url and timeout).
This class is not thread safe !
- The self.serverURL member should be set by the inheriting class
"""
def __init__( self, **kwargs ):
""" C'tor.
:param kwargs: just stored as an attribute and passed when creating
the RPCClient
"""
self.serverURL = None
self.call = None # I suppose it is initialized here to make pylint happy
self.__kwargs = kwargs
def setServer( self, url ):
""" Set the server URL used by default
:param url: url of the service
"""
self.serverURL = url
def setTimeout( self, timeout ):
""" Specify the timeout of the call. Forwarded to RPCClient
:param timeout: guess...
"""
self.__kwargs['timeout'] = timeout
def getServer( self ):
""" Getter for the server url. Useful ?
"""
return self.serverURL
def __getattr__( self, name ):
""" Store the attribute asked and call executeRPC.
This means that Client should not be shared between threads !
"""
# This allows the dir() method to work as well as tab completion in ipython
if name == '__dir__':
return super( Client, self ).__getattr__() #pylint: disable=no-member
self.call = name
return self.executeRPC
def executeRPC( self, *parms, **kws ):
""" This method extracts some parameters from kwargs that
are used as parameter of the constructor or RPCClient.
Unfortunately, only a few of all the available
parameters of BaseClient are exposed.
:param rpc: if an RPC client is passed, use that one
:param timeout: we can change the timeout on a per call bases. Default 120 s
:param url: We can specify which url to use
"""
toExecute = self.call
# Check whether 'rpc' keyword is specified
rpc = False
if kws.has_key( 'rpc' ):
rpc = kws['rpc']
del kws['rpc']
# Check whether the 'timeout' keyword is specified
timeout = 120
if kws.has_key( 'timeout' ):
timeout = kws['timeout']
del kws['timeout']
# Check whether the 'url' keyword is specified
url = ''
if kws.has_key( 'url' ):
url = kws['url']
del kws['url']
# Create the RPCClient
rpcClient = self._getRPC( rpc, url, timeout )
# Execute the method
return getattr( rpcClient, toExecute )( *parms )
# evalString = "rpcClient.%s(*parms,**kws)" % toExecute
# return eval( evalString )
def _getRPC( self, rpc = None, url = '', timeout = 600 ):
""" Return an RPCClient object constructed following the attributes.
:param rpc: if set, returns this object
:param url: url of the service. If not set, use self.serverURL
:param timeout: timeout of the call
"""
if not rpc:
if not url:
url = self.serverURL
self.__kwargs.setdefault( 'timeout', timeout )
rpc = RPCClient( url, **self.__kwargs )
return rpc
| gpl-3.0 | 7,096,099,077,450,960,000 | 32.333333 | 84 | 0.625455 | false |
glennhickey/hal | benchmarks/benchMark.py | 1 | 2163 | #!/usr/bin/env python3
#Copyright (C) 2012 by Glenn Hickey
# Copyright (C) 2012-2019 by UCSC Computational Genomics Lab
#
#Released under the MIT license, see LICENSE.txt
import argparse
import os
import sys
import traceback
import time
import random
import resource
import psutil
from sonLib.bioio import getTempDirectory
from sonLib.bioio import getTempFile
from sonLib.bioio import popenCatch
from sonLib.bioio import system
def runHalGen(preset, seed, hdf5Chunk, hdf5Compression, outPath):
system("halRandGen --preset %s --seed %d --hdf5Chunk %d\
--hdf5Compression %d %s" % (preset, seed, hdf5Chunk, hdf5Compression, outPath))
def runHalCons(halPath, outputPath):
system("halCons %s > outputPath" % halPath)
def main(argv=None):
if argv is None:
argv = sys.argv
seed = random.randint(0, 2**31)
parser = argparse.ArgumentParser(description='Run little hal test')
parser.add_argument('--preset', type=str,
help='halGenRandom preset to use [small, medium, big, large]', default='small')
args = parser.parse_args()
rval = 0
print("chunk, comp, time(gen), time(cons), fsize(k)")
try:
for chunkSize in [10000, 100000, 1000000, 10000000]:
for compression in [0, 2, 5, 7, 9]:
try:
tempDir = getTempDirectory(rootDir="./")
tempFile = getTempFile(suffix=".h5", rootDir=tempDir)
except:
traceback.print_exc(file=sys.stdout)
return 1
t = time.time()
runHalGen(args.preset, seed, chunkSize, compression, tempFile)
fsize = os.path.getsize(tempFile)
th = time.time() - t
runHalCons(tempFile, getTempFile(rootDir=tempDir))
tc = time.time() - th - t
print("%d, %d, %f.3, %f.3, %f.2" % (
chunkSize, compression, th, tc, fsize / 1024.))
except:
traceback.print_exc(file=sys.stdout)
return 1
system("rm -rf %s" % tempDir)
return rval
if __name__ == "__main__":
sys.exit(main())
| mit | 5,595,900,775,344,440,000 | 30.347826 | 103 | 0.598243 | false |
ppries/tensorflow | tensorflow/contrib/bayesflow/python/kernel_tests/entropy_test.py | 12 | 12557 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
distributions = tf.contrib.distributions
layers = tf.contrib.layers
entropy = tf.contrib.bayesflow.entropy
class NormalNoEntropy(distributions.Normal): # pylint: disable=no-init
"""Normal distribution without a `.entropy` method."""
def entropy(self):
return NotImplementedError('Entropy removed by gremlins')
def get_train_op(scalar_loss, optimizer='SGD', learning_rate=1.0, decay=0.0):
global_step = tf.Variable(0)
def decay_fn(rate, t):
return rate * (1 + tf.to_float(t))**(-decay)
train_op = layers.optimize_loss(
scalar_loss,
global_step,
learning_rate,
optimizer,
learning_rate_decay_fn=decay_fn)
return train_op
def _assert_monotonic_decreasing(array, atol=1e-5):
array = np.asarray(array)
_assert_monotonic_increasing(-array, atol=atol)
def _assert_monotonic_increasing(array, atol=1e-5):
array = np.asarray(array)
diff = np.diff(array.ravel())
np.testing.assert_array_less(-1 * atol, diff)
class ElboRatioTest(tf.test.TestCase):
"""Show sampling converges to true KL values."""
def setUp(self):
self._rng = np.random.RandomState(0)
def test_convergence_to_kl_using_sample_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use samples
# to estimate every part of the KL divergence ratio.
vector_shape = (2, 3)
n_samples = 5000
with self.test_session():
q = distributions.MultivariateNormalDiag(
mu=self._rng.rand(*vector_shape),
diag_stdev=self._rng.rand(*vector_shape))
p = distributions.MultivariateNormalDiag(
mu=self._rng.rand(*vector_shape),
diag_stdev=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
actual_kl = distributions.kl(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.03)
def test_convergence_to_kl_using_analytic_entropy_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use an
# analytic entropy combined with sampled cross-entropy.
n_samples = 5000
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
mu=self._rng.rand(*vector_shape),
diag_stdev=self._rng.rand(*vector_shape))
p = distributions.MultivariateNormalDiag(
mu=self._rng.rand(*vector_shape),
diag_stdev=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.analytic_entropy,
seed=42)
actual_kl = distributions.kl(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.05)
def test_sample_kl_zero_when_p_and_q_are_the_same_distribution(self):
n_samples = 50
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
mu=self._rng.rand(*vector_shape),
diag_stdev=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=q.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(np.zeros(2), sample_kl.eval())
class EntropyShannonTest(tf.test.TestCase):
def test_normal_entropy_default_form_uses_exact_entropy(self):
with self.test_session():
dist = distributions.Normal(mu=1.11, sigma=2.22)
mc_entropy = entropy.entropy_shannon(dist, n=11)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval())
def test_normal_entropy_analytic_form_uses_exact_entropy(self):
with self.test_session():
dist = distributions.Normal(mu=1.11, sigma=2.22)
mc_entropy = entropy.entropy_shannon(
dist, form=entropy.ELBOForms.analytic_entropy)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval())
def test_normal_entropy_sample_form_gets_approximate_answer(self):
# Tested by showing we get a good answer that is not exact.
with self.test_session():
dist = distributions.Normal(mu=1.11, sigma=2.22)
mc_entropy = entropy.entropy_shannon(
dist, n=1000, form=entropy.ELBOForms.sample, seed=0)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)
# Make sure there is some error, proving we used samples
self.assertLess(0.0001, tf.abs(exact_entropy - mc_entropy).eval())
def test_default_entropy_falls_back_on_sample_if_analytic_not_available(self):
# Tested by showing we get a good answer that is not exact.
with self.test_session():
# NormalNoEntropy is like a Normal, but does not have .entropy method, so
# we are forced to fall back on sample entropy.
dist_no_entropy = NormalNoEntropy(mu=1.11, sigma=2.22)
dist_yes_entropy = distributions.Normal(mu=1.11, sigma=2.22)
mc_entropy = entropy.entropy_shannon(
dist_no_entropy, n=1000, form=entropy.ELBOForms.sample, seed=0)
exact_entropy = dist_yes_entropy.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)
# Make sure there is some error, proving we used samples
self.assertLess(0.0001, tf.abs(exact_entropy - mc_entropy).eval())
class RenyiRatioTest(tf.test.TestCase):
"""Show renyi_ratio is minimized when the distributions match."""
def setUp(self):
self._rng = np.random.RandomState(0)
def test_fitting_two_dimensional_normal_n_equals_1000(self):
# Minmizing Renyi divergence should allow us to make one normal match
# another one exactly.
n = 1000
mu_true = np.array([1.0, -1.0], dtype=np.float64)
chol_true = np.array([[2.0, 0.0], [0.5, 1.0]], dtype=np.float64)
with self.test_session() as sess:
target = distributions.MultivariateNormalCholesky(mu_true, chol_true)
# Set up q distribution by defining mean/covariance as Variables
mu = tf.Variable(np.zeros(mu_true.shape), dtype=mu_true.dtype, name='mu')
mat = tf.Variable(
np.zeros(chol_true.shape), dtype=chol_true.dtype, name='mat')
chol = distributions.matrix_diag_transform(mat, transform=tf.nn.softplus)
q = distributions.MultivariateNormalCholesky(mu, chol)
for alpha in [0.25, 0.75]:
negative_renyi_divergence = entropy.renyi_ratio(
log_p=target.log_prob, q=q, n=n, alpha=alpha, seed=0)
train_op = get_train_op(
tf.reduce_mean(-negative_renyi_divergence),
optimizer='SGD',
learning_rate=0.5,
decay=0.1)
tf.global_variables_initializer().run()
renyis = []
for step in range(1000):
sess.run(train_op)
if step in [1, 5, 100]:
renyis.append(negative_renyi_divergence.eval())
# This optimization should maximize the renyi divergence.
_assert_monotonic_increasing(renyis, atol=0)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(target.mu.eval(), q.mu.eval(), rtol=0.06)
self.assertAllClose(target.sigma.eval(), q.sigma.eval(), rtol=0.02)
def test_divergence_between_identical_distributions_is_zero(self):
n = 1000
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
mu=self._rng.rand(*vector_shape),
diag_stdev=self._rng.rand(*vector_shape))
for alpha in [0.25, 0.75]:
negative_renyi_divergence = entropy.renyi_ratio(
log_p=q.log_prob, q=q, n=n, alpha=alpha, seed=0)
self.assertEqual((2,), negative_renyi_divergence.get_shape())
self.assertAllClose(np.zeros(2), negative_renyi_divergence.eval())
class RenyiAlphaTest(tf.test.TestCase):
def test_with_three_alphas(self):
with self.test_session():
for dtype in (tf.float32, tf.float64):
alpha_min = tf.constant(0.0, dtype=dtype)
alpha_max = 0.5
decay_time = 3
alpha_0 = entropy.renyi_alpha(
0, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_1 = entropy.renyi_alpha(
1, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_2 = entropy.renyi_alpha(
2, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_3 = entropy.renyi_alpha(
3, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
# Alpha should start at alpha_max.
self.assertAllClose(alpha_max, alpha_0.eval(), atol=1e-5)
# Alpha should finish at alpha_min.
self.assertAllClose(alpha_min.eval(), alpha_3.eval(), atol=1e-5)
# In between, alpha should be monotonically decreasing.
_assert_monotonic_decreasing(
[alpha_0.eval(), alpha_1.eval(), alpha_2.eval(), alpha_3.eval()])
def test_non_scalar_input_raises(self):
with self.test_session():
# Good values here
step = 0
alpha_min = 0.0
alpha_max = 0.5
decay_time = 3
# Use one bad value inside each check.
# The "bad" value is always the non-scalar one.
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
[step], decay_time, alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, [decay_time], alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, decay_time, alpha_min=[alpha_min], alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, decay_time, alpha_min=alpha_min, alpha_max=[alpha_max]).eval()
def test_input_with_wrong_sign_raises(self):
with self.test_session():
# Good values here
step = 0
alpha_min = 0.0
alpha_max = 0.5
decay_time = 3
# Use one bad value inside each check.
# The "bad" value is always the non-scalar one.
with self.assertRaisesOpError('decay_time must be positive'):
entropy.renyi_alpha(
step, 0.0, alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesOpError('step must be non-negative'):
entropy.renyi_alpha(
-1, decay_time, alpha_min=alpha_min, alpha_max=alpha_max).eval()
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -4,692,664,063,794,712,000 | 36.261128 | 80 | 0.648244 | false |
RMMoreton/drawwrite | drawwritesite/drawwrite/admin.py | 1 | 1408 | """Configure the drawwrite admin page."""
from django.contrib import admin
from .models import Chain, DrawLink, Game, Player, WriteLink
def get_game_name_from_chain(chain):
"""Return the name of the game that chain belongs to."""
return chain.player.game.name
get_game_name_from_chain.short_description = 'Game Name'
class DrawLinkInline(admin.StackedInline):
"""Configure the inline DrawLinks."""
model = DrawLink
extra = 0
class WriteLinkInline(admin.StackedInline):
"""Configure the inline WriteLinks view."""
model = WriteLink
extra = 0
class ChainAdmin(admin.ModelAdmin):
"""Configure the top level Chain view."""
inlines = [WriteLinkInline, DrawLinkInline]
list_display = (
'pk',
get_game_name_from_chain,
'player',
'next_link_position',
'time_created',
)
readonly_fields = ('pk',)
class PlayerAdmin(admin.ModelAdmin):
"""Configure the top level Player view."""
readonly_fields = ('pk',)
class PlayerInline(admin.StackedInline):
"""Configure the inline Player view."""
model = Player
extra = 0
readonly_fields = ('pk',)
class GameAdmin(admin.ModelAdmin):
"""Configure the top level Game view."""
inlines = [PlayerInline]
readonly_fields = ('pk',)
admin.site.register(Game, GameAdmin)
admin.site.register(Player, PlayerAdmin)
admin.site.register(Chain, ChainAdmin)
| mit | 3,072,840,755,034,112,500 | 26.607843 | 60 | 0.677557 | false |
robbiet480/home-assistant | tests/components/cover/test_device_action.py | 5 | 15362 | """The tests for Cover device actions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.cover import DOMAIN
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a cover."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[0]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_actions = [
{
"domain": DOMAIN,
"type": "open",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
{
"domain": DOMAIN,
"type": "close",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_actions_tilt(hass, device_reg, entity_reg):
"""Test we get the expected actions from a cover."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[3]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_actions = [
{
"domain": DOMAIN,
"type": "open",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
{
"domain": DOMAIN,
"type": "close",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
{
"domain": DOMAIN,
"type": "open_tilt",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
{
"domain": DOMAIN,
"type": "close_tilt",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_actions_set_pos(hass, device_reg, entity_reg):
"""Test we get the expected actions from a cover."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[1]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_actions = [
{
"domain": DOMAIN,
"type": "set_position",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_actions_set_tilt_pos(hass, device_reg, entity_reg):
"""Test we get the expected actions from a cover."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[2]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_actions = [
{
"domain": DOMAIN,
"type": "open",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
{
"domain": DOMAIN,
"type": "close",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
{
"domain": DOMAIN,
"type": "set_tilt_position",
"device_id": device_entry.id,
"entity_id": ent.entity_id,
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_action_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[0]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert len(actions) == 2 # open, close
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, "action", action
)
assert capabilities == {"extra_fields": []}
async def test_get_action_capabilities_set_pos(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[1]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_capabilities = {
"extra_fields": [
{
"name": "position",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
}
]
}
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert len(actions) == 1 # set_position
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, "action", action
)
if action["type"] == "set_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_get_action_capabilities_set_tilt_pos(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[2]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_capabilities = {
"extra_fields": [
{
"name": "position",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
}
]
}
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert len(actions) == 3 # open, close, set_tilt_position
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, "action", action
)
if action["type"] == "set_tilt_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_action(hass):
"""Test for cover actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event_open"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "open",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_close"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "close",
},
},
]
},
)
open_calls = async_mock_service(hass, "cover", "open_cover")
close_calls = async_mock_service(hass, "cover", "close_cover")
hass.bus.async_fire("test_event_open")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 0
hass.bus.async_fire("test_event_close")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
hass.bus.async_fire("test_event_stop")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
async def test_action_tilt(hass):
"""Test for cover tilt actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event_open"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "open_tilt",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_close"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "close_tilt",
},
},
]
},
)
open_calls = async_mock_service(hass, "cover", "open_cover_tilt")
close_calls = async_mock_service(hass, "cover", "close_cover_tilt")
hass.bus.async_fire("test_event_open")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 0
hass.bus.async_fire("test_event_close")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
hass.bus.async_fire("test_event_stop")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
async def test_action_set_position(hass):
"""Test for cover set position actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_pos",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "set_position",
"position": 25,
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_tilt_pos",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "set_tilt_position",
"position": 75,
},
},
]
},
)
cover_pos_calls = async_mock_service(hass, "cover", "set_cover_position")
tilt_pos_calls = async_mock_service(hass, "cover", "set_cover_tilt_position")
hass.bus.async_fire("test_event_set_pos")
await hass.async_block_till_done()
assert len(cover_pos_calls) == 1
assert cover_pos_calls[0].data["position"] == 25
assert len(tilt_pos_calls) == 0
hass.bus.async_fire("test_event_set_tilt_pos")
await hass.async_block_till_done()
assert len(cover_pos_calls) == 1
assert len(tilt_pos_calls) == 1
assert tilt_pos_calls[0].data["tilt_position"] == 75
| apache-2.0 | 6,677,853,787,852,848,000 | 32.837004 | 87 | 0.55455 | false |
johndlong/walrus | walrus/search/porter.py | 4 | 12745 | """\
Original Copyright and Information
Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Vivake Gupta ([email protected])
Release 1: January 2001
Further adjustments by Santiago Bruno ([email protected])
to allow word input not restricted to one word per line, leading
to:
release 2: July 2008
Modified and adapted to work with stdnet.
"""
import sys
class PorterStemmer(object):
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if (self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or
self.b[i] == 'o' or self.b[i] == 'u'):
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel -
consonant and also if the second c is not w, x or y.
This is used when trying to restore an e at the end of a short e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if (i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or
not self.cons(i-2)):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s,
readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in
the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"): self.r("al")
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"): self.r("log")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy
to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if (self.ends("ion") and
(self.b[self.j] == 's' or self.b[self.j] == 't')):
pass
elif self.ends("ou"):
pass
# takes care of -ous
else:
return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem(self, p, i, j):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
| mit | 4,402,611,869,401,863,000 | 34.104816 | 78 | 0.468419 | false |
aospx-kitkat/platform_external_chromium_org | ppapi/generators/idl_propertynode.py | 40 | 5465 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Hierarchical property system for IDL AST """
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_option import GetOption, Option, ParseOptions
#
# IDLPropertyNode
#
# A property node is a hierarchically aware system for mapping
# keys to values, such that a local dictionary is search first,
# followed by parent dictionaries in order.
#
class IDLPropertyNode(object):
def __init__(self):
self.parents = []
self.property_map = {}
def Error(self, msg):
name = self.GetProperty('NAME', 'Unknown')
parents = [parent.GetProperty('NAME', '???') for parent in self.parents]
ErrOut.Log('%s [%s] : %s' % (name, ' '.join(parents), msg))
def AddParent(self, parent):
assert parent
self.parents.append(parent)
def SetProperty(self, name, val):
self.property_map[name] = val
def _GetProperty_(self, name):
# Check locally for the property, and return it if found.
prop = self.property_map.get(name, None)
if prop is not None: return prop
# If not, seach parents in order
for parent in self.parents:
prop = parent.GetProperty(name)
if prop is not None: return prop
# Otherwise, it can not be found.
return None
def GetProperty(self, name, default=None):
prop = self._GetProperty_(name)
if prop is None:
return default
else:
return prop
def GetPropertyLocal(self, name, default=None):
# Search for the property, but only locally, returning the
# default if not found.
prop = self.property_map.get(name, default)
return prop
# Regular expression to parse property keys in a string such that a string
# "My string $NAME$" will find the key "NAME".
regex_var = re.compile('(?P<src>[^\\$]+)|(?P<key>\\$\\w+\\$)')
def GetPropertyList(self):
return self.property_map.keys()
# Recursively expands text keys in the form of $KEY$ with the value
# of the property of the same name. Since this is done recursively
# one property can be defined in terms of another.
def Replace(self, text):
itr = IDLPropertyNode.regex_var.finditer(text)
out = ''
for m in itr:
(start, stop) = m.span()
if m.lastgroup == 'src':
out += text[start:stop]
if m.lastgroup == 'key':
key = text[start+1:stop-1]
val = self.GetProperty(key, None)
if not val:
self.Error('No property "%s"' % key)
out += self.Replace(str(val))
return out
#
# Testing functions
#
# Build a property node, setting the properties including a name, and
# associate the children with this new node.
#
def BuildNode(name, props, children=[], parents=[]):
node = IDLPropertyNode()
node.SetProperty('NAME', name)
for prop in props:
toks = prop.split('=')
node.SetProperty(toks[0], toks[1])
for child in children:
child.AddParent(node)
for parent in parents:
node.AddParent(parent)
return node
def ExpectProp(node, name, val):
found = node.GetProperty(name)
if found != val:
ErrOut.Log('Got property %s expecting %s' % (found, val))
return 1
return 0
#
# Verify property inheritance
#
def PropertyTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectProp(top, 'Left', 'Top')
errors += ExpectProp(top, 'Right', 'Top')
errors += ExpectProp(left, 'Left', 'Left')
errors += ExpectProp(left, 'Right', 'Top')
errors += ExpectProp(right, 'Left', 'Top')
errors += ExpectProp(right, 'Right', 'Right')
if not errors: InfoOut.Log('Passed PropertyTest')
return errors
def ExpectText(node, text, val):
found = node.Replace(text)
if found != val:
ErrOut.Log('Got replacement %s expecting %s' % (found, val))
return 1
return 0
#
# Verify text replacement
#
def ReplaceTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectText(top, '$Left$', 'Top')
errors += ExpectText(top, '$Right$', 'Top')
errors += ExpectText(left, '$Left$', 'Left')
errors += ExpectText(left, '$Right$', 'Top')
errors += ExpectText(right, '$Left$', 'Top')
errors += ExpectText(right, '$Right$', 'Right')
if not errors: InfoOut.Log('Passed ReplaceTest')
return errors
def MultiParentTest():
errors = 0
parent1 = BuildNode('parent1', ['PARENT1=parent1', 'TOPMOST=$TOP$'])
parent2 = BuildNode('parent2', ['PARENT1=parent2', 'PARENT2=parent2'])
child = BuildNode('child', ['CHILD=child'], parents=[parent1, parent2])
BuildNode('top', ['TOP=top'], children=[parent1])
errors += ExpectText(child, '$CHILD$', 'child')
errors += ExpectText(child, '$PARENT1$', 'parent1')
errors += ExpectText(child, '$PARENT2$', 'parent2')
# Verify recursive resolution
errors += ExpectText(child, '$TOPMOST$', 'top')
if not errors: InfoOut.Log('Passed MultiParentTest')
return errors
def Main():
errors = 0
errors += PropertyTest()
errors += ReplaceTest()
errors += MultiParentTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause | 2,883,256,284,417,654,000 | 26.741117 | 76 | 0.652333 | false |
jdilallo/jdilallo-test | examples/adwords/v201309/misc/get_all_images_and_videos.py | 1 | 2315 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all images and videos.
To upload an image, run upload_image.py. To upload video, see:
http://adwords.google.com/support/aw/bin/answer.py?hl=en&answer=39454.
Tags: MediaService.get
"""
__author__ = ('[email protected] (Kevin Winter)'
'Joseph DiLallo')
from googleads import adwords
PAGE_SIZE = 500
def main(client):
# Initialize appropriate service.
media_service = client.GetService('MediaService', version='v201309')
# Construct selector and get all images.
offset = 0
selector = {
'fields': ['MediaId', 'Type', 'Width', 'Height', 'MimeType'],
'predicates': [{
'field': 'Type',
'operator': 'IN',
'values': ['IMAGE', 'VIDEO']
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
while more_pages:
page = media_service.get(selector)
# Display results.
if 'entries' in page:
for image in page['entries']:
dimensions = dict([(entry['key'], entry['value'])
for entry in image['dimensions']])
print ('Media with id \'%s\', dimensions \'%sx%s\', and MimeType \'%s\''
' was found.' % (image['mediaId'], dimensions['FULL']['height'],
dimensions['FULL']['width'], image['mimeType']))
else:
print 'No images/videos were found.'
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| apache-2.0 | 4,196,729,325,004,015,000 | 29.866667 | 80 | 0.633261 | false |
sharoonthomas/nereid | nereid/routing.py | 9 | 2610 | # -*- coding: utf-8 -*-
"""
The host matching URL Map seems to be matching hosts well but fails in
generating/building URLs when there are same endpoints.
This patch makes strict host matching to ensure nothing skips host
matching.
Also see: https://github.com/mitsuhiko/werkzeug/issues/488
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from werkzeug import routing
from nereid import request
class Map(routing.Map):
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
host = self.map.host_matching and self.server_name or self.subdomain
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method, host):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
class Rule(routing.Rule):
def __init__(self, *args, **kwargs):
self.readonly = kwargs.pop('readonly', None)
self.is_csrf_exempt = kwargs.pop('exempt_csrf', False)
super(Rule, self).__init__(*args, **kwargs)
def empty(self):
"""Return an unbound copy of this rule. This can be useful if you
want to reuse an already bound URL for another map.
Ref: https://github.com/mitsuhiko/werkzeug/pull/645
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return self.__class__(
self.rule, defaults, self.subdomain, self.methods,
self.build_only, self.endpoint, self.strict_slashes,
self.redirect_to, self.alias, self.host
)
@property
def is_readonly(self):
if self.readonly is not None:
# If a value that is not None is explicitly set for the URL,
# then return that.
return self.readonly
# By default GET and HEAD requests are allocated a readonly cursor
return request.method in ('HEAD', 'GET')
| gpl-3.0 | -8,672,897,463,554,729,000 | 35.25 | 76 | 0.61341 | false |
rubendura/django-rest-framework | tests/test_renderers.py | 53 | 17136 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from collections import MutableMapping
from django.conf.urls import include, url
from django.core.cache import cache
from django.db import models
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from rest_framework import permissions, serializers, status
from rest_framework.compat import OrderedDict
from rest_framework.renderers import (
BaseRenderer, BrowsableAPIRenderer, HTMLFormRenderer, JSONRenderer
)
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory
from rest_framework.views import APIView
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
def RENDERER_A_SERIALIZER(x):
return ('Renderer A: %s' % x).encode('ascii')
def RENDERER_B_SERIALIZER(x):
return ('Renderer B: %s' % x).encode('ascii')
expected_results = [
((elem for elem in [1, 2, 3]), JSONRenderer, b'[1,2,3]') # Generator
]
class DummyTestModel(models.Model):
name = models.CharField(max_length=42, default='')
class BasicRendererTests(TestCase):
def test_expected_results(self):
for value, renderer_cls, expected in expected_results:
output = renderer_cls().render(value)
self.assertEqual(output, expected)
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_A_SERIALIZER(data)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_B_SERIALIZER(data)
class MockView(APIView):
renderer_classes = (RendererA, RendererB)
def get(self, request, **kwargs):
response = Response(DUMMYCONTENT, status=DUMMYSTATUS)
return response
class MockGETView(APIView):
def get(self, request, **kwargs):
return Response({'foo': ['bar', 'baz']})
class MockPOSTView(APIView):
def post(self, request, **kwargs):
return Response({'foo': request.data})
class EmptyGETView(APIView):
renderer_classes = (JSONRenderer,)
def get(self, request, **kwargs):
return Response(status=status.HTTP_204_NO_CONTENT)
class HTMLView(APIView):
renderer_classes = (BrowsableAPIRenderer, )
def get(self, request, **kwargs):
return Response('text')
class HTMLView1(APIView):
renderer_classes = (BrowsableAPIRenderer, JSONRenderer)
def get(self, request, **kwargs):
return Response('text')
urlpatterns = [
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
url(r'^$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
url(r'^cache$', MockGETView.as_view()),
url(r'^parseerror$', MockPOSTView.as_view(renderer_classes=[JSONRenderer, BrowsableAPIRenderer])),
url(r'^html$', HTMLView.as_view()),
url(r'^html1$', HTMLView1.as_view()),
url(r'^empty$', EmptyGETView.as_view()),
url(r'^api', include('rest_framework.urls', namespace='rest_framework'))
]
class POSTDeniedPermission(permissions.BasePermission):
def has_permission(self, request, view):
return request.method != 'POST'
class POSTDeniedView(APIView):
renderer_classes = (BrowsableAPIRenderer,)
permission_classes = (POSTDeniedPermission,)
def get(self, request):
return Response()
def post(self, request):
return Response()
def put(self, request):
return Response()
def patch(self, request):
return Response()
class DocumentingRendererTests(TestCase):
def test_only_permitted_forms_are_displayed(self):
view = POSTDeniedView.as_view()
request = APIRequestFactory().get('/')
response = view(request).render()
self.assertNotContains(response, '>POST<')
self.assertContains(response, '>PUT<')
self.assertContains(response, '>PATCH<')
class RendererEndToEndTests(TestCase):
"""
End-to-end testing of renderers using an RendererMixin on a generic view.
"""
urls = 'tests.test_renderers'
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, six.b(''))
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_accept_query(self):
"""The '_accept' query string should behave in the same way as the Accept header."""
param = '?%s=%s' % (
api_settings.URL_ACCEPT_OVERRIDE,
RendererB.media_type
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_unsatisfiable_accept_header_on_request_returns_406_status(self):
"""If the Accept header is unsatisfiable we should return a 406 Not Acceptable response."""
resp = self.client.get('/', HTTP_ACCEPT='foo/bar')
self.assertEqual(resp.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_parse_error_renderers_browsable_api(self):
"""Invalid data should still render the browsable API correctly."""
resp = self.client.post('/parseerror', data='foobar', content_type='application/json', HTTP_ACCEPT='text/html')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_204_no_content_responses_have_no_content_type_set(self):
"""
Regression test for #1196
https://github.com/tomchristie/django-rest-framework/issues/1196
"""
resp = self.client.get('/empty')
self.assertEqual(resp.get('Content-Type', None), None)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_contains_headers_of_api_response(self):
"""
Issue #1437
Test we display the headers of the API response and not those from the
HTML response
"""
resp = self.client.get('/html1')
self.assertContains(resp, '>GET, HEAD, OPTIONS<')
self.assertContains(resp, '>application/json<')
self.assertNotContains(resp, '>text/html; charset=utf-8<')
_flat_repr = '{"foo":["bar","baz"]}'
_indented_repr = '{\n "foo": [\n "bar",\n "baz"\n ]\n}'
def strip_trailing_whitespace(content):
"""
Seems to be some inconsistencies re. trailing whitespace with
different versions of the json lib.
"""
return re.sub(' +\n', '\n', content)
class JSONRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_render_lazy_strings(self):
"""
JSONRenderer should deal with lazy translated strings.
"""
ret = JSONRenderer().render(_('test'))
self.assertEqual(ret, b'"test"')
def test_render_queryset_values(self):
o = DummyTestModel.objects.create(name='dummy')
qs = DummyTestModel.objects.values('id', 'name')
ret = JSONRenderer().render(qs)
data = json.loads(ret.decode('utf-8'))
self.assertEquals(data, [{'id': o.id, 'name': o.name}])
def test_render_queryset_values_list(self):
o = DummyTestModel.objects.create(name='dummy')
qs = DummyTestModel.objects.values_list('id', 'name')
ret = JSONRenderer().render(qs)
data = json.loads(ret.decode('utf-8'))
self.assertEquals(data, [[o.id, o.name]])
def test_render_dict_abc_obj(self):
class Dict(MutableMapping):
def __init__(self):
self._dict = dict()
def __getitem__(self, key):
return self._dict.__getitem__(key)
def __setitem__(self, key, value):
return self._dict.__setitem__(key, value)
def __delitem__(self, key):
return self._dict.__delitem__(key)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
def keys(self):
return self._dict.keys()
x = Dict()
x['key'] = 'string value'
x[2] = 3
ret = JSONRenderer().render(x)
data = json.loads(ret.decode('utf-8'))
self.assertEquals(data, {'key': 'string value', '2': 3})
def test_render_obj_with_getitem(self):
class DictLike(object):
def __init__(self):
self._dict = {}
def set(self, value):
self._dict = dict(value)
def __getitem__(self, key):
return self._dict[key]
x = DictLike()
x.set({'a': 1, 'b': 'string'})
with self.assertRaises(TypeError):
JSONRenderer().render(x)
def test_without_content_type_args(self):
"""
Test basic JSON rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json')
# Fix failing test case which depends on version of JSON library.
self.assertEqual(content.decode('utf-8'), _flat_repr)
def test_with_content_type_args(self):
"""
Test JSON rendering with additional content type arguments supplied.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json; indent=2')
self.assertEqual(strip_trailing_whitespace(content.decode('utf-8')), _indented_repr)
class UnicodeJSONRendererTests(TestCase):
"""
Tests specific for the Unicode JSON Renderer
"""
def test_proper_encoding(self):
obj = {'countries': ['United Kingdom', 'France', 'España']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json')
self.assertEqual(content, '{"countries":["United Kingdom","France","España"]}'.encode('utf-8'))
def test_u2028_u2029(self):
# The \u2028 and \u2029 characters should be escaped,
# even when the non-escaping unicode representation is used.
# Regression test for #2169
obj = {'should_escape': '\u2028\u2029'}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json')
self.assertEqual(content, '{"should_escape":"\\u2028\\u2029"}'.encode('utf-8'))
class AsciiJSONRendererTests(TestCase):
"""
Tests specific for the Unicode JSON Renderer
"""
def test_proper_encoding(self):
class AsciiJSONRenderer(JSONRenderer):
ensure_ascii = True
obj = {'countries': ['United Kingdom', 'France', 'España']}
renderer = AsciiJSONRenderer()
content = renderer.render(obj, 'application/json')
self.assertEqual(content, '{"countries":["United Kingdom","France","Espa\\u00f1a"]}'.encode('utf-8'))
# Tests for caching issue, #346
class CacheRenderTest(TestCase):
"""
Tests specific to caching responses
"""
urls = 'tests.test_renderers'
def test_head_caching(self):
"""
Test caching of HEAD requests
"""
response = self.client.head('/cache')
cache.set('key', response)
cached_response = cache.get('key')
assert isinstance(cached_response, Response)
assert cached_response.content == response.content
assert cached_response.status_code == response.status_code
def test_get_caching(self):
"""
Test caching of GET requests
"""
response = self.client.get('/cache')
cache.set('key', response)
cached_response = cache.get('key')
assert isinstance(cached_response, Response)
assert cached_response.content == response.content
assert cached_response.status_code == response.status_code
class TestJSONIndentationStyles:
def test_indented(self):
renderer = JSONRenderer()
data = OrderedDict([('a', 1), ('b', 2)])
assert renderer.render(data) == b'{"a":1,"b":2}'
def test_compact(self):
renderer = JSONRenderer()
data = OrderedDict([('a', 1), ('b', 2)])
context = {'indent': 4}
assert (
renderer.render(data, renderer_context=context) ==
b'{\n "a": 1,\n "b": 2\n}'
)
def test_long_form(self):
renderer = JSONRenderer()
renderer.compact = False
data = OrderedDict([('a', 1), ('b', 2)])
assert renderer.render(data) == b'{"a": 1, "b": 2}'
class TestHiddenFieldHTMLFormRenderer(TestCase):
def test_hidden_field_rendering(self):
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=True)
serializer = TestSerializer(data={})
serializer.is_valid()
renderer = HTMLFormRenderer()
field = serializer['published']
rendered = renderer.render_field(field, {})
assert rendered == ''
| bsd-2-clause | 8,635,560,888,795,971,000 | 35.221987 | 119 | 0.6376 | false |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/django/core/cache/backends/memcached.py | 41 | 6954 | "Memcached cache backend"
import pickle
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils import six
from django.utils.encoding import force_str
from django.utils.functional import cached_property
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, six.string_types):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS')
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See https://github.com/memcached/memcached/wiki/Programming#expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def make_key(self, key, version=None):
# Python 2 memcache requires the key to be a byte string.
return force_str(super(BaseMemcachedCache, self).make_key(key, version))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = [self.make_key(x, version=version) for x in keys]
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
safe_data[key] = value
self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
def delete_many(self, keys, version=None):
self._cache.delete_multi(self.make_key(key, version=version) for key in keys)
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL)
return self._client
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
return client
| gpl-3.0 | -5,305,973,762,145,933,000 | 36.793478 | 108 | 0.605982 | false |
rghe/ansible | lib/ansible/modules/packaging/os/apt_repository.py | 19 | 19322 | #!/usr/bin/python
# encoding: utf-8
# Copyright: (c) 2012, Matt Wright <[email protected]>
# Copyright: (c) 2013, Alexander Saltanov <[email protected]>
# Copyright: (c) 2014, Rutger Spiertz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: apt_repository
short_description: Add and remove APT repositories
description:
- Add or remove an APT repositories in Ubuntu and Debian.
notes:
- This module works on Debian, Ubuntu and their derivatives.
- This module supports Debian Squeeze (version 6) as well as its successors.
options:
repo:
description:
- A source string for the repository.
required: true
state:
description:
- A source string state.
choices: [ absent, present ]
default: "present"
mode:
description:
- The octal mode for newly created files in sources.list.d
default: '0644'
version_added: "1.6"
update_cache:
description:
- Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
type: bool
default: "yes"
validate_certs:
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: '1.8'
filename:
description:
- Sets the name of the source list file in sources.list.d.
Defaults to a file name based on the repository source url.
The .list extension will be automatically added.
version_added: '2.1'
codename:
description:
- Override the distribution codename to use for PPA repositories.
Should usually only be set when working with a PPA on a non-Ubuntu target (e.g. Debian or Mint)
version_added: '2.3'
author:
- Alexander Saltanov (@sashka)
version_added: "0.7"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
'''
EXAMPLES = '''
# Add specified repository into sources list.
- apt_repository:
repo: deb http://archive.canonical.com/ubuntu hardy partner
state: present
# Add specified repository into sources list using specified filename.
- apt_repository:
repo: deb http://dl.google.com/linux/chrome/deb/ stable main
state: present
filename: google-chrome
# Add source repository into sources list.
- apt_repository:
repo: deb-src http://archive.canonical.com/ubuntu hardy partner
state: present
# Remove specified repository from sources list.
- apt_repository:
repo: deb http://archive.canonical.com/ubuntu hardy partner
state: absent
# Add nginx stable repository from PPA and install its signing key.
# On Ubuntu target:
- apt_repository:
repo: ppa:nginx/stable
# On Debian target
- apt_repository:
repo: 'ppa:nginx/stable'
codename: trusty
'''
import glob
import json
import os
import re
import sys
import tempfile
try:
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
except ImportError:
distro = None
HAVE_PYTHON_APT = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
if sys.version_info[0] < 3:
PYTHON_APT = 'python-apt'
else:
PYTHON_APT = 'python3-apt'
DEFAULT_SOURCES_PERM = 0o0644
VALID_SOURCE_TYPES = ('deb', 'deb-src')
def install_python_apt(module):
if not module.check_mode:
apt_get_path = module.get_bin_path('apt-get')
if apt_get_path:
rc, so, se = module.run_command([apt_get_path, 'update'])
if rc != 0:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
rc, so, se = module.run_command([apt_get_path, 'install', PYTHON_APT, '-y', '-q'])
if rc == 0:
global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
else:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
else:
module.fail_json(msg="%s must be installed to use check mode" % PYTHON_APT)
class InvalidSource(Exception):
pass
# Simple version of aptsources.sourceslist.SourcesList.
# No advanced logic and no backups inside.
class SourcesList(object):
def __init__(self, module):
self.module = module
self.files = {} # group sources by file
# Repositories that we're adding -- used to implement mode param
self.new_repos = set()
self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
# read sources.list if it exists
if os.path.isfile(self.default_file):
self.load(self.default_file)
# read sources.list.d
for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
self.load(file)
def __iter__(self):
'''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
for file, sources in self.files.items():
for n, valid, enabled, source, comment in sources:
if valid:
yield file, n, enabled, source, comment
raise StopIteration
def _expand_path(self, filename):
if '/' in filename:
return filename
else:
return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
def _suggest_filename(self, line):
def _cleanup_filename(s):
filename = self.module.params['filename']
if filename is not None:
return filename
return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
def _strip_username_password(s):
if '@' in s:
s = s.split('@', 1)
s = s[-1]
return s
# Drop options and protocols.
line = re.sub(r'\[[^\]]+\]', '', line)
line = re.sub(r'\w+://', '', line)
# split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
# Drop usernames and passwords
parts[0] = _strip_username_password(parts[0])
return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
def _parse(self, line, raise_if_invalid_or_disabled=False):
valid = False
enabled = True
source = ''
comment = ''
line = line.strip()
if line.startswith('#'):
enabled = False
line = line[1:]
# Check for another "#" in the line and treat a part after it as a comment.
i = line.find('#')
if i > 0:
comment = line[i + 1:].strip()
line = line[:i]
# Split a source into substring to make sure that it is source spec.
# Duplicated whitespaces in a valid source spec will be removed.
source = line.strip()
if source:
chunks = source.split()
if chunks[0] in VALID_SOURCE_TYPES:
valid = True
source = ' '.join(chunks)
if raise_if_invalid_or_disabled and (not valid or not enabled):
raise InvalidSource(line)
return valid, enabled, source, comment
@staticmethod
def _apt_cfg_file(filespec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_file(filespec)
except AttributeError:
result = apt_pkg.Config.FindFile(filespec)
return result
@staticmethod
def _apt_cfg_dir(dirspec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_dir(dirspec)
except AttributeError:
result = apt_pkg.Config.FindDir(dirspec)
return result
def load(self, file):
group = []
f = open(file, 'r')
for n, line in enumerate(f):
valid, enabled, source, comment = self._parse(line)
group.append((n, valid, enabled, source, comment))
self.files[file] = group
def save(self):
for filename, sources in list(self.files.items()):
if sources:
d, fn = os.path.split(filename)
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
line = ''.join(chunks)
try:
f.write(line)
except IOError as err:
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(err)))
self.module.atomic_move(tmp_path, filename)
# allow the user to override the default mode
if filename in self.new_repos:
this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM)
self.module.set_mode_if_different(filename, this_mode, False)
else:
del self.files[filename]
if os.path.exists(filename):
os.remove(filename)
def dump(self):
dumpstruct = {}
for filename, sources in self.files.items():
if sources:
lines = []
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
lines.append(''.join(chunks))
dumpstruct[filename] = ''.join(lines)
return dumpstruct
def _choice(self, new, old):
if new is None:
return old
return new
def modify(self, file, n, enabled=None, source=None, comment=None):
'''
This function to be used with iterator, so we don't care of invalid sources.
If source, enabled, or comment is None, original value from line ``n`` will be preserved.
'''
valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
def _add_valid_source(self, source_new, comment_new, file):
# We'll try to reuse disabled source if we have it.
# If we have more than one entry, we will enable them all - no advanced logic, remember.
found = False
for filename, n, enabled, source, comment in self:
if source == source_new:
self.modify(filename, n, enabled=True)
found = True
if not found:
if file is None:
file = self.default_file
else:
file = self._expand_path(file)
if file not in self.files:
self.files[file] = []
files = self.files[file]
files.append((len(files), True, True, source_new, comment_new))
self.new_repos.add(file)
def add_source(self, line, comment='', file=None):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
# Prefer separate files for new sources.
self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
def _remove_valid_source(self, source):
# If we have more than one entry, we will remove them all (not comment, remove!)
for filename, n, enabled, src, comment in self:
if source == src and enabled:
self.files[filename].pop(n)
def remove_source(self, line):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
class UbuntuSourcesList(SourcesList):
LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
def __init__(self, module, add_ppa_signing_keys_callback=None):
self.module = module
self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
self.codename = module.params['codename'] or distro.codename
super(UbuntuSourcesList, self).__init__(module)
def _get_ppa_info(self, owner_name, ppa_name):
lp_api = self.LP_API % (owner_name, ppa_name)
headers = dict(Accept='application/json')
response, info = fetch_url(self.module, lp_api, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
return json.loads(to_native(response.read()))
def _expand_ppa(self, path):
ppa = path.split(':')[1]
ppa_owner = ppa.split('/')[0]
try:
ppa_name = ppa.split('/')[1]
except IndexError:
ppa_name = 'ppa'
line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
rc, out, err = self.module.run_command('apt-key export %s' % key_fingerprint, check_rc=True)
return len(err) == 0
def add_source(self, line, comment='', file=None):
if line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(line)
if source in self.repos_urls:
# repository already exists
return
if self.add_ppa_signing_keys_callback is not None:
info = self._get_ppa_info(ppa_owner, ppa_name)
if not self._key_already_exists(info['signing_key_fingerprint']):
command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']]
self.add_ppa_signing_keys_callback(command)
file = file or self._suggest_filename('%s_%s' % (line, self.codename))
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
file = file or self._suggest_filename(source)
self._add_valid_source(source, comment, file)
def remove_source(self, line):
if line.startswith('ppa:'):
source = self._expand_ppa(line)[0]
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
@property
def repos_urls(self):
_repositories = []
for parsed_repos in self.files.values():
for parsed_repo in parsed_repos:
valid = parsed_repo[1]
enabled = parsed_repo[2]
source_line = parsed_repo[3]
if not valid or not enabled:
continue
if source_line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(source_line)
_repositories.append(source)
else:
_repositories.append(source_line)
return _repositories
def get_add_ppa_signing_key_callback(module):
def _run_command(command):
module.run_command(command, check_rc=True)
if module.check_mode:
return None
else:
return _run_command
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
mode=dict(type='raw'),
update_cache=dict(type='bool', default=True, aliases=['update-cache']),
filename=dict(type='str'),
# This should not be needed, but exists as a failsafe
install_python_apt=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
codename=dict(type='str'),
),
supports_check_mode=True,
)
params = module.params
repo = module.params['repo']
state = module.params['state']
update_cache = module.params['update_cache']
# Note: mode is referenced in SourcesList class via the passed in module (self here)
sourceslist = None
if not HAVE_PYTHON_APT:
if params['install_python_apt']:
install_python_apt(module)
else:
module.fail_json(msg='%s is not installed, and install_python_apt is False' % PYTHON_APT)
if not repo:
module.fail_json(msg='Please set argument \'repo\' to a non-empty value')
if isinstance(distro, aptsources_distro.Distribution):
sourceslist = UbuntuSourcesList(module, add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
else:
module.fail_json(msg='Module apt_repository is not supported on target.')
sources_before = sourceslist.dump()
try:
if state == 'present':
sourceslist.add_source(repo)
elif state == 'absent':
sourceslist.remove_source(repo)
except InvalidSource as err:
module.fail_json(msg='Invalid repository string: %s' % to_native(err))
sources_after = sourceslist.dump()
changed = sources_before != sources_after
if changed and module._diff:
diff = []
for filename in set(sources_before.keys()).union(sources_after.keys()):
diff.append({'before': sources_before.get(filename, ''),
'after': sources_after.get(filename, ''),
'before_header': (filename, '/dev/null')[filename not in sources_before],
'after_header': (filename, '/dev/null')[filename not in sources_after]})
else:
diff = {}
if changed and not module.check_mode:
try:
sourceslist.save()
if update_cache:
cache = apt.Cache()
cache.update()
except OSError as err:
module.fail_json(msg=to_native(err))
module.exit_json(changed=changed, repo=repo, state=state, diff=diff)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,007,677,621,501,388,300 | 34.130909 | 146 | 0.577321 | false |
mattcen/alloc | bin/alloccli/timesheets.py | 1 | 6113 | """alloccli subcommand for viewing a list of timesheets."""
from alloc import alloc
class timesheets(alloc):
"""Print a list of time sheets."""
# Setup the options that this cli can accept
ops = []
ops.append(('', 'help ', 'Show this help.'))
ops.append(('', 'csv=[WHEN] ', 'Return the results in CSV format. WHEN can be "auto",\n'
'"never" or "always". If WHEN is omitted, assume "always".'))
ops.append(('q', 'quiet ', 'Run with no output except errors.'))
ops.append(('i', 'items ', 'Show time sheet\'s items.'))
ops.append(('p:', 'project=ID|NAME', 'A project ID, or a fuzzy match for a project name.'))
ops.append(('', 'task=ID|NAME ', 'A task ID, or a fuzzy match for a task name.'))
ops.append(('s:', 'status=STATUS ', 'The time sheets\' status. Can accept multiple values.\n'
'(eg: "edit,manager,admin,invoiced,finished,rejected" or "all". Default: edit)'))
ops.append(('a:', 'account=TF ', 'The time sheets\' TF name.'))
ops.append(('c:', 'creator=NICK ', 'The time sheets\' creator username.'))
ops.append(('t:', 'tsid=ID ', 'A time sheet ID.'))
ops.append(('h.', 'hours=NUM ', 'The time sheets must have this many hours recorded.\n'
'(eg: "7" eg: ">7 AND <10 OR =4 AND !=8")'))
ops.append(('d.', 'date=YYYY-MM-DD', 'If --items is specified, then match against the items\' date.\n'
'Else match against the date of the time sheet\'s earliest item.'))
ops.append(('o:', 'order=NAME ', 'The order the Time Sheets or Items are displayed in.\n'
'Time sheets default: -o From -o ID, Default for items: -o Date -o Item\\ ID'))
ops.append(('f:', 'fields=LIST ', 'The list of fields you would like printed.\n'
'(eg: -f all eg: -f ID -f Item\\ ID -o Task\\ ID -o Comment)'))
ops.append(('', 'possible-fields', 'List of possible fields.'))
# Specify some header and footer text for the help text
help_text = "Usage: %s [OPTIONS]\n"
help_text += __doc__
help_text += '''\n\n%s
If run without arguments this program will display all of your editable time sheets.
Examples:
alloc timesheets --hours "2" --date 2010-01-01
alloc timesheets --hours ">2 AND <10 OR >20 AND <=100"
alloc timesheets --status finished --hours ">=7" --date "<=1 week ago"
alloc timesheets --date "2010-10-10"
alloc timesheets --date "<=2010-10-10"
alloc timesheets --date ">=2010-10-10" --items'''
def run(self, command_list):
"""Execute subcommand."""
# Get the command line arguments into a dictionary
o, remainder_ = self.get_args(command_list, self.ops, self.help_text)
# Got this far, then authenticate
self.authenticate()
if o['possible-fields']:
alloc().possible_fields("timeSheet")
# Initialize some variables
self.quiet = o['quiet']
personID = self.get_my_personID()
projectID = ""
taskID = ""
timeSheetID = ""
order_ts = ["From", "ID"]
order_tsi = ["Date", "Item ID"]
status = "edit"
if o['date']:
o['date'] = self.parse_date(o['date'])
# Get a projectID either passed via command line, or figured out from a
# project name
if self.is_num(o['project']):
projectID = o['project']
elif o['project']:
projectID = self.search_for_project(
o['project'], personID, die=False)
if self.is_num(o['task']):
taskID = o['task']
elif o['task']:
taskID = self.search_for_task(
{'taskName': o['task'], 'taskView': 'prioritised'})
if self.is_num(o['tsid']):
timeSheetID = o['tsid']
if 'all' in o['status']:
status = [
'edit', 'manager', 'admin', 'invoiced', 'finished', 'rejected']
elif o['status']:
status = o['status']
if o['order']:
order = o['order']
elif o['items']:
order = order_tsi
else:
order = order_ts
ops = {}
if timeSheetID:
ops['timeSheetID'] = timeSheetID
else:
ops['status'] = status
if 'account' in o and o['account']:
tfargs = {}
tfargs['method'] = 'get_tfID'
tfargs['options'] = o['account']
ops['tfID'] = self.make_request(tfargs)
elif o['creator']:
ops['personID'] = self.get_my_personID(o['creator'])
else:
ops['personID'] = personID
if projectID:
ops['projectID'] = projectID
if taskID:
ops['taskID'] = taskID
if o['hours']:
ops['timeSheetItemHours'] = o['hours']
if o['items']:
timeSheets = self.get_list("timeSheet", ops)
if timeSheets:
tids = []
for i, t_ in timeSheets.items():
tids.append(i)
if tids:
ops["timeSheetID"] = tids
if o['date']:
# >=
ops['date'], ops[
'dateComparator'] = self.parse_date_comparator(o['date'])
timeSheetItems = self.get_list("timeSheetItem", ops)
self.print_table("timeSheetItem", timeSheetItems, o[
"fields"] or self.row_timeSheetItem, sort=order)
else:
if o['date']:
# <=
ops['dateFrom'], ops[
'dateFromComparator'] = self.parse_date_comparator(o['date'])
timeSheets = self.get_list("timeSheet", ops)
self.print_table(
"timeSheet", timeSheets, o["fields"] or self.row_timeSheet, sort=order)
| agpl-3.0 | 2,668,750,069,352,206,000 | 39.217105 | 122 | 0.506462 | false |
Vaidyanath/tempest | tempest/api/image/v1/test_images.py | 4 | 11832 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO as StringIO
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class CreateRegisterImagesTest(base.BaseV1ImageTest):
"""Here we test the registration and creation of images."""
@test.attr(type='gate')
def test_register_then_upload(self):
# Register, then upload an image
properties = {'prop1': 'val1'}
body = self.create_image(name='New Name',
container_format='bare',
disk_format='raw',
is_public=False,
properties=properties)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
# Now try uploading an image file
image_file = StringIO.StringIO(data_utils.random_bytes())
body = self.client.update_image(image_id, data=image_file)
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@test.attr(type='gate')
def test_register_remote_image(self):
# Register a new remote image
body = self.create_image(name='New Remote Image',
container_format='bare',
disk_format='raw', is_public=False,
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
properties = body.get('properties')
self.assertEqual(properties['key1'], 'value1')
self.assertEqual(properties['key2'], 'value2')
@test.attr(type='gate')
def test_register_http_image(self):
body = self.create_image(name='New Http Image',
container_format='bare',
disk_format='raw', is_public=False,
copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
self.client.get_image(image_id)
@test.attr(type='gate')
def test_register_image_with_min_ram(self):
# Register an image with min ram
properties = {'prop1': 'val1'}
body = self.create_image(name='New_image_with_min_ram',
container_format='bare',
disk_format='raw',
is_public=False,
min_ram=40,
properties=properties)
self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
self.client.delete_image(body['id'])
class ListImagesTest(base.BaseV1ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
img1 = cls._create_remote_image('one', 'bare', 'raw')
img2 = cls._create_remote_image('two', 'ami', 'ami')
img3 = cls._create_remote_image('dup', 'bare', 'raw')
img4 = cls._create_remote_image('dup', 'bare', 'raw')
img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
cls.created_set = set(cls.created_images)
# 4x-4x remote image
cls.remote_set = set((img1, img2, img3, img4))
cls.standard_set = set((img5, img6, img7, img8))
# 5x bare, 3x ami
cls.bare_set = set((img1, img3, img4, img7, img8))
cls.ami_set = set((img2, img5, img6))
# 1x with size 42
cls.size42_set = set((img5,))
# 3x with size 142
cls.size142_set = set((img6, img7, img8,))
# dup named
cls.dup_set = set((img3, img4))
@classmethod
def _create_remote_image(cls, name, container_format, disk_format):
"""
Create a new remote image and return the ID of the newly-registered
image
"""
name = 'New Remote Image %s' % name
location = CONF.image.http_image
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False,
location=location)
image_id = image['id']
return image_id
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
image_file = StringIO.StringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file)
image_id = image['id']
return image_id
@test.attr(type='gate')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.image_list()
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
self.assertIn(image_id, image_list)
@test.attr(type='gate')
def test_index_disk_format(self):
images_list = self.client.image_list(disk_format='ami')
for image in images_list:
self.assertEqual(image['disk_format'], 'ami')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.ami_set <= result_set)
self.assertFalse(self.created_set - self.ami_set <= result_set)
@test.attr(type='gate')
def test_index_container_format(self):
images_list = self.client.image_list(container_format='bare')
for image in images_list:
self.assertEqual(image['container_format'], 'bare')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.bare_set <= result_set)
self.assertFalse(self.created_set - self.bare_set <= result_set)
@test.attr(type='gate')
def test_index_max_size(self):
images_list = self.client.image_list(size_max=42)
for image in images_list:
self.assertTrue(image['size'] <= 42)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size42_set <= result_set)
self.assertFalse(self.created_set - self.size42_set <= result_set)
@test.attr(type='gate')
def test_index_min_size(self):
images_list = self.client.image_list(size_min=142)
for image in images_list:
self.assertTrue(image['size'] >= 142)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size142_set <= result_set)
self.assertFalse(self.size42_set <= result_set)
@test.attr(type='gate')
def test_index_status_active_detail(self):
images_list = self.client.image_list_detail(status='active',
sort_key='size',
sort_dir='desc')
top_size = images_list[0]['size'] # We have non-zero sized images
for image in images_list:
size = image['size']
self.assertTrue(size <= top_size)
top_size = size
self.assertEqual(image['status'], 'active')
@test.attr(type='gate')
def test_index_name(self):
images_list = self.client.image_list_detail(
name='New Remote Image dup')
result_set = set(map(lambda x: x['id'], images_list))
for image in images_list:
self.assertEqual(image['name'], 'New Remote Image dup')
self.assertTrue(self.dup_set <= result_set)
self.assertFalse(self.created_set - self.dup_set <= result_set)
class UpdateImageMetaTest(base.BaseV1ImageTest):
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image.
"""
image_file = StringIO.StringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file,
properties={'key1': 'value1'})
image_id = image['id']
return image_id
@test.attr(type='gate')
def test_list_image_metadata(self):
# All metadata key/value pairs for an image should be returned
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'value1'}
self.assertEqual(expected, resp_metadata['properties'])
@test.attr(type='gate')
def test_update_image_metadata(self):
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
metadata = self.client.get_image_meta(self.image_id)
self.assertEqual(metadata['properties'], {'key1': 'value1'})
metadata['properties'].update(req_metadata)
metadata = self.client.update_image(
self.image_id, properties=metadata['properties'])
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'alt1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata['properties'])
| apache-2.0 | -8,566,382,824,186,845,000 | 41.714801 | 78 | 0.573614 | false |
mhnatiuk/phd_sociology_of_religion | scrapper/lib/python2.7/site-packages/scrapy/tests/test_crawl.py | 10 | 8408 | import json
import socket
import mock
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.utils.test import docrawl, get_testlog
from scrapy.tests.spiders import FollowAllSpider, DelaySpider, SimpleSpider, \
BrokenStartRequestsSpider, SingleRequestSpider, DuplicateStartRequestsSpider
from scrapy.tests.mockserver import MockServer
from scrapy.http import Request
class CrawlTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_follow_all(self):
spider = FollowAllSpider()
yield docrawl(spider)
self.assertEqual(len(spider.urls_visited), 11) # 10 + start_url
@defer.inlineCallbacks
def test_delay(self):
# short to long delays
yield self._test_delay(0.2, False)
yield self._test_delay(1, False)
# randoms
yield self._test_delay(0.2, True)
yield self._test_delay(1, True)
@defer.inlineCallbacks
def _test_delay(self, delay, randomize):
settings = {"DOWNLOAD_DELAY": delay, 'RANDOMIZE_DOWNLOAD_DELAY': randomize}
spider = FollowAllSpider(maxlatency=delay * 2)
yield docrawl(spider, settings)
t = spider.times
totaltime = t[-1] - t[0]
avgd = totaltime / (len(t) - 1)
tolerance = 0.6 if randomize else 0.2
self.assertTrue(avgd > delay * (1 - tolerance),
"download delay too small: %s" % avgd)
@defer.inlineCallbacks
def test_timeout_success(self):
spider = DelaySpider(n=0.5)
yield docrawl(spider)
self.assertTrue(spider.t1 > 0)
self.assertTrue(spider.t2 > 0)
self.assertTrue(spider.t2 > spider.t1)
@defer.inlineCallbacks
def test_timeout_failure(self):
spider = DelaySpider(n=0.5)
yield docrawl(spider, {"DOWNLOAD_TIMEOUT": 0.35})
self.assertTrue(spider.t1 > 0)
self.assertTrue(spider.t2 == 0)
self.assertTrue(spider.t2_err > 0)
self.assertTrue(spider.t2_err > spider.t1)
# server hangs after receiving response headers
spider = DelaySpider(n=0.5, b=1)
yield docrawl(spider, {"DOWNLOAD_TIMEOUT": 0.35})
self.assertTrue(spider.t1 > 0)
self.assertTrue(spider.t2 == 0)
self.assertTrue(spider.t2_err > 0)
self.assertTrue(spider.t2_err > spider.t1)
@defer.inlineCallbacks
def test_retry_503(self):
spider = SimpleSpider("http://localhost:8998/status?n=503")
yield docrawl(spider)
self._assert_retried()
@defer.inlineCallbacks
def test_retry_conn_failed(self):
spider = SimpleSpider("http://localhost:65432/status?n=503")
yield docrawl(spider)
self._assert_retried()
@defer.inlineCallbacks
def test_retry_dns_error(self):
with mock.patch('socket.gethostbyname',
side_effect=socket.gaierror(-5, 'No address associated with hostname')):
spider = SimpleSpider("http://example.com/")
yield docrawl(spider)
self._assert_retried()
@defer.inlineCallbacks
def test_start_requests_bug_before_yield(self):
spider = BrokenStartRequestsSpider(fail_before_yield=1)
yield docrawl(spider)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
@defer.inlineCallbacks
def test_start_requests_bug_yielding(self):
spider = BrokenStartRequestsSpider(fail_yielding=1)
yield docrawl(spider)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
@defer.inlineCallbacks
def test_start_requests_lazyness(self):
settings = {"CONCURRENT_REQUESTS": 1}
spider = BrokenStartRequestsSpider()
yield docrawl(spider, settings)
#self.assertTrue(False, spider.seedsseen)
#self.assertTrue(spider.seedsseen.index(None) < spider.seedsseen.index(99),
# spider.seedsseen)
@defer.inlineCallbacks
def test_start_requests_dupes(self):
settings = {"CONCURRENT_REQUESTS": 1}
spider = DuplicateStartRequestsSpider(dont_filter=True,
distinct_urls=2,
dupe_factor=3)
yield docrawl(spider, settings)
self.assertEqual(spider.visited, 6)
spider = DuplicateStartRequestsSpider(dont_filter=False,
distinct_urls=3,
dupe_factor=4)
yield docrawl(spider, settings)
self.assertEqual(spider.visited, 3)
@defer.inlineCallbacks
def test_unbounded_response(self):
# Completeness of responses without Content-Length or Transfer-Encoding
# can not be determined, we treat them as valid but flagged as "partial"
from urllib import urlencode
query = urlencode({'raw': '''\
HTTP/1.1 200 OK
Server: Apache-Coyote/1.1
X-Powered-By: Servlet 2.4; JBoss-4.2.3.GA (build: SVNTag=JBoss_4_2_3_GA date=200807181417)/JBossWeb-2.0
Set-Cookie: JSESSIONID=08515F572832D0E659FD2B0D8031D75F; Path=/
Pragma: no-cache
Expires: Thu, 01 Jan 1970 00:00:00 GMT
Cache-Control: no-cache
Cache-Control: no-store
Content-Type: text/html;charset=UTF-8
Content-Language: en
Date: Tue, 27 Aug 2013 13:05:05 GMT
Connection: close
foo body
with multiples lines
'''})
spider = SimpleSpider("http://localhost:8998/raw?{0}".format(query))
yield docrawl(spider)
log = get_testlog()
self.assertEqual(log.count("Got response 200"), 1)
@defer.inlineCallbacks
def test_retry_conn_lost(self):
# connection lost after receiving data
spider = SimpleSpider("http://localhost:8998/drop?abort=0")
yield docrawl(spider)
self._assert_retried()
@defer.inlineCallbacks
def test_retry_conn_aborted(self):
# connection lost before receiving data
spider = SimpleSpider("http://localhost:8998/drop?abort=1")
yield docrawl(spider)
self._assert_retried()
def _assert_retried(self):
log = get_testlog()
self.assertEqual(log.count("Retrying"), 2)
self.assertEqual(log.count("Gave up retrying"), 1)
@defer.inlineCallbacks
def test_referer_header(self):
"""Referer header is set by RefererMiddleware unless it is already set"""
req0 = Request('http://localhost:8998/echo?headers=1&body=0', dont_filter=1)
req1 = req0.replace()
req2 = req0.replace(headers={'Referer': None})
req3 = req0.replace(headers={'Referer': 'http://example.com'})
req0.meta['next'] = req1
req1.meta['next'] = req2
req2.meta['next'] = req3
spider = SingleRequestSpider(seed=req0)
yield docrawl(spider)
# basic asserts in case of weird communication errors
self.assertIn('responses', spider.meta)
self.assertNotIn('failures', spider.meta)
# start requests doesn't set Referer header
echo0 = json.loads(spider.meta['responses'][2].body)
self.assertNotIn('Referer', echo0['headers'])
# following request sets Referer to start request url
echo1 = json.loads(spider.meta['responses'][1].body)
self.assertEqual(echo1['headers'].get('Referer'), [req0.url])
# next request avoids Referer header
echo2 = json.loads(spider.meta['responses'][2].body)
self.assertNotIn('Referer', echo2['headers'])
# last request explicitly sets a Referer header
echo3 = json.loads(spider.meta['responses'][3].body)
self.assertEqual(echo3['headers'].get('Referer'), ['http://example.com'])
@defer.inlineCallbacks
def test_engine_status(self):
from scrapy.utils.engine import get_engine_status
est = []
def cb(response):
est.append(get_engine_status(spider.crawler.engine))
spider = SingleRequestSpider(seed='http://localhost:8998/', callback_func=cb)
yield docrawl(spider)
self.assertEqual(len(est), 1, est)
s = dict(est[0])
self.assertEqual(s['engine.spider.name'], spider.name)
self.assertEqual(s['len(engine.scraper.slot.active)'], 1)
| gpl-2.0 | -2,349,612,145,538,740,000 | 37.568807 | 103 | 0.639748 | false |
alisaifee/holmium.core | versioneer.py | 11 | 24303 | #! /usr/bin/python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.8+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
modify your __init__.py to define __version__ (by calling a function
from _version.py)
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os, sys, re
from setuptools import Command
from setuptools.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
LONG_VERSION_PY = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.8+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded variables.
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
except NameError:
return default
return (versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import os.path
import sys
# os.path.relpath only appeared in Python-2.6 . Define it here for 2.5.
def os_path_relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def do_vcs_install(versionfile_source, ipy):
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
files = [versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os_path_relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command([GIT, "add", "--"] + files)
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.8+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
f.close()
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
try:
root = os.path.dirname(os.path.abspath(__file__))
except NameError:
root = os.path.dirname(os.path.abspath(sys.argv[0]))
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_abs)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
ver = versions_from_vcs(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
| mit | -6,332,344,183,225,210,000 | 37.57619 | 91 | 0.625314 | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32com/client/gencache.py | 4 | 23679 | """Manages the cache of generated Python code.
Description
This file manages the cache of generated Python code. When run from the
command line, it also provides a number of options for managing that cache.
Implementation
Each typelib is generated into a filename of format "{guid}x{lcid}x{major}x{minor}.py"
An external persistant dictionary maps from all known IIDs in all known type libraries
to the type library itself.
Thus, whenever Python code knows the IID of an object, it can find the IID, LCID and version of
the type library which supports it. Given this information, it can find the Python module
with the support.
If necessary, this support can be generated on the fly.
Hacks, to do, etc
Currently just uses a pickled dictionary, but should used some sort of indexed file.
Maybe an OLE2 compound file, or a bsddb file?
"""
import pywintypes, os, sys
import pythoncom
import win32com, win32com.client
import glob
import traceback
from . import CLSIDToClass
import operator
try:
from imp import reload # exported by the imp module in py3k.
except:
pass # a builtin on py2k.
bForDemandDefault = 0 # Default value of bForDemand - toggle this to change the world - see also makepy.py
# The global dictionary
clsidToTypelib = {}
# If we have a different version of the typelib generated, this
# maps the "requested version" to the "generated version".
versionRedirectMap = {}
# There is no reason we *must* be readonly in a .zip, but we are now,
# Rather than check for ".zip" or other tricks, PEP302 defines
# a "__loader__" attribute, so we use that.
# (Later, it may become necessary to check if the __loader__ can update files,
# as a .zip loader potentially could - but punt all that until a need arises)
is_readonly = is_zip = hasattr(win32com, "__loader__") and hasattr(win32com.__loader__, "archive")
# A dictionary of ITypeLibrary objects for demand generation explicitly handed to us
# Keyed by usual clsid, lcid, major, minor
demandGeneratedTypeLibraries = {}
import pickle as pickle
def __init__():
# Initialize the module. Called once explicitly at module import below.
try:
_LoadDicts()
except IOError:
Rebuild()
pickleVersion = 1
def _SaveDicts():
if is_readonly:
raise RuntimeError("Trying to write to a readonly gencache ('%s')!" \
% win32com.__gen_path__)
f = open(os.path.join(GetGeneratePath(), "dicts.dat"), "wb")
try:
p = pickle.Pickler(f)
p.dump(pickleVersion)
p.dump(clsidToTypelib)
finally:
f.close()
def _LoadDicts():
# Load the dictionary from a .zip file if that is where we live.
if is_zip:
import io as io
loader = win32com.__loader__
arc_path = loader.archive
dicts_path = os.path.join(win32com.__gen_path__, "dicts.dat")
if dicts_path.startswith(arc_path):
dicts_path = dicts_path[len(arc_path)+1:]
else:
# Hm. See below.
return
try:
data = loader.get_data(dicts_path)
except AttributeError:
# The __loader__ has no get_data method. See below.
return
except IOError:
# Our gencache is in a .zip file (and almost certainly readonly)
# but no dicts file. That actually needn't be fatal for a frozen
# application. Assuming they call "EnsureModule" with the same
# typelib IDs they have been frozen with, that EnsureModule will
# correctly re-build the dicts on the fly. However, objects that
# rely on the gencache but have not done an EnsureModule will
# fail (but their apps are likely to fail running from source
# with a clean gencache anyway, as then they would be getting
# Dynamic objects until the cache is built - so the best answer
# for these apps is to call EnsureModule, rather than freezing
# the dict)
return
f = io.StringIO(data)
else:
# NOTE: IOError on file open must be caught by caller.
f = open(os.path.join(win32com.__gen_path__, "dicts.dat"), "rb")
try:
p = pickle.Unpickler(f)
version = p.load()
global clsidToTypelib
clsidToTypelib = p.load()
versionRedirectMap.clear()
finally:
f.close()
def GetGeneratedFileName(clsid, lcid, major, minor):
"""Given the clsid, lcid, major and minor for a type lib, return
the file name (no extension) providing this support.
"""
return str(clsid).upper()[1:-1] + "x%sx%sx%s" % (lcid, major, minor)
def SplitGeneratedFileName(fname):
"""Reverse of GetGeneratedFileName()
"""
return tuple(fname.split('x',4))
def GetGeneratePath():
"""Returns the name of the path to generate to.
Checks the directory is OK.
"""
assert not is_readonly, "Why do you want the genpath for a readonly store?"
try:
os.makedirs(win32com.__gen_path__)
#os.mkdir(win32com.__gen_path__)
except os.error:
pass
try:
fname = os.path.join(win32com.__gen_path__, "__init__.py")
os.stat(fname)
except os.error:
f = open(fname,"w")
f.write('# Generated file - this directory may be deleted to reset the COM cache...\n')
f.write('import win32com\n')
f.write('if __path__[:-1] != win32com.__gen_path__: __path__.append(win32com.__gen_path__)\n')
f.close()
return win32com.__gen_path__
#
# The helpers for win32com.client.Dispatch and OCX clients.
#
def GetClassForProgID(progid):
"""Get a Python class for a Program ID
Given a Program ID, return a Python class which wraps the COM object
Returns the Python class, or None if no module is available.
Params
progid -- A COM ProgramID or IID (eg, "Word.Application")
"""
clsid = pywintypes.IID(progid) # This auto-converts named to IDs.
return GetClassForCLSID(clsid)
def GetClassForCLSID(clsid):
"""Get a Python class for a CLSID
Given a CLSID, return a Python class which wraps the COM object
Returns the Python class, or None if no module is available.
Params
clsid -- A COM CLSID (or string repr of one)
"""
# first, take a short-cut - we may already have generated support ready-to-roll.
clsid = str(clsid)
if CLSIDToClass.HasClass(clsid):
return CLSIDToClass.GetClass(clsid)
mod = GetModuleForCLSID(clsid)
if mod is None:
return None
try:
return CLSIDToClass.GetClass(clsid)
except KeyError:
return None
def GetModuleForProgID(progid):
"""Get a Python module for a Program ID
Given a Program ID, return a Python module which contains the
class which wraps the COM object.
Returns the Python module, or None if no module is available.
Params
progid -- A COM ProgramID or IID (eg, "Word.Application")
"""
try:
iid = pywintypes.IID(progid)
except pywintypes.com_error:
return None
return GetModuleForCLSID(iid)
def GetModuleForCLSID(clsid):
"""Get a Python module for a CLSID
Given a CLSID, return a Python module which contains the
class which wraps the COM object.
Returns the Python module, or None if no module is available.
Params
progid -- A COM CLSID (ie, not the description)
"""
clsid_str = str(clsid)
try:
typelibCLSID, lcid, major, minor = clsidToTypelib[clsid_str]
except KeyError:
return None
try:
mod = GetModuleForTypelib(typelibCLSID, lcid, major, minor)
except ImportError:
mod = None
if mod is not None:
sub_mod = mod.CLSIDToPackageMap.get(clsid_str)
if sub_mod is None:
sub_mod = mod.VTablesToPackageMap.get(clsid_str)
if sub_mod is not None:
sub_mod_name = mod.__name__ + "." + sub_mod
try:
__import__(sub_mod_name)
except ImportError:
info = typelibCLSID, lcid, major, minor
# Force the generation. If this typelibrary has explicitly been added,
# use it (it may not be registered, causing a lookup by clsid to fail)
if info in demandGeneratedTypeLibraries:
info = demandGeneratedTypeLibraries[info]
from . import makepy
makepy.GenerateChildFromTypeLibSpec(sub_mod, info)
# Generate does an import...
mod = sys.modules[sub_mod_name]
return mod
def GetModuleForTypelib(typelibCLSID, lcid, major, minor):
"""Get a Python module for a type library ID
Given the CLSID of a typelibrary, return an imported Python module,
else None
Params
typelibCLSID -- IID of the type library.
major -- Integer major version.
minor -- Integer minor version
lcid -- Integer LCID for the library.
"""
modName = GetGeneratedFileName(typelibCLSID, lcid, major, minor)
mod = _GetModule(modName)
# If the import worked, it doesn't mean we have actually added this
# module to our cache though - check that here.
if "_in_gencache_" not in mod.__dict__:
AddModuleToCache(typelibCLSID, lcid, major, minor)
assert "_in_gencache_" in mod.__dict__
return mod
def MakeModuleForTypelib(typelibCLSID, lcid, major, minor, progressInstance = None, bForDemand = bForDemandDefault, bBuildHidden = 1):
"""Generate support for a type library.
Given the IID, LCID and version information for a type library, generate
and import the necessary support files.
Returns the Python module. No exceptions are caught.
Params
typelibCLSID -- IID of the type library.
major -- Integer major version.
minor -- Integer minor version.
lcid -- Integer LCID for the library.
progressInstance -- Instance to use as progress indicator, or None to
use the GUI progress bar.
"""
from . import makepy
makepy.GenerateFromTypeLibSpec( (typelibCLSID, lcid, major, minor), progressInstance=progressInstance, bForDemand = bForDemand, bBuildHidden = bBuildHidden)
return GetModuleForTypelib(typelibCLSID, lcid, major, minor)
def MakeModuleForTypelibInterface(typelib_ob, progressInstance = None, bForDemand = bForDemandDefault, bBuildHidden = 1):
"""Generate support for a type library.
Given a PyITypeLib interface generate and import the necessary support files. This is useful
for getting makepy support for a typelibrary that is not registered - the caller can locate
and load the type library itself, rather than relying on COM to find it.
Returns the Python module.
Params
typelib_ob -- The type library itself
progressInstance -- Instance to use as progress indicator, or None to
use the GUI progress bar.
"""
from . import makepy
try:
makepy.GenerateFromTypeLibSpec( typelib_ob, progressInstance=progressInstance, bForDemand = bForDemandDefault, bBuildHidden = bBuildHidden)
except pywintypes.com_error:
return None
tla = typelib_ob.GetLibAttr()
guid = tla[0]
lcid = tla[1]
major = tla[3]
minor = tla[4]
return GetModuleForTypelib(guid, lcid, major, minor)
def EnsureModuleForTypelibInterface(typelib_ob, progressInstance = None, bForDemand = bForDemandDefault, bBuildHidden = 1):
"""Check we have support for a type library, generating if not.
Given a PyITypeLib interface generate and import the necessary
support files if necessary. This is useful for getting makepy support
for a typelibrary that is not registered - the caller can locate and
load the type library itself, rather than relying on COM to find it.
Returns the Python module.
Params
typelib_ob -- The type library itself
progressInstance -- Instance to use as progress indicator, or None to
use the GUI progress bar.
"""
tla = typelib_ob.GetLibAttr()
guid = tla[0]
lcid = tla[1]
major = tla[3]
minor = tla[4]
#If demand generated, save the typelib interface away for later use
if bForDemand:
demandGeneratedTypeLibraries[(str(guid), lcid, major, minor)] = typelib_ob
try:
return GetModuleForTypelib(guid, lcid, major, minor)
except ImportError:
pass
# Generate it.
return MakeModuleForTypelibInterface(typelib_ob, progressInstance, bForDemand, bBuildHidden)
def ForgetAboutTypelibInterface(typelib_ob):
"""Drop any references to a typelib previously added with EnsureModuleForTypelibInterface and forDemand"""
tla = typelib_ob.GetLibAttr()
guid = tla[0]
lcid = tla[1]
major = tla[3]
minor = tla[4]
info = str(guid), lcid, major, minor
try:
del demandGeneratedTypeLibraries[info]
except KeyError:
# Not worth raising an exception - maybe they dont know we only remember for demand generated, etc.
print("ForgetAboutTypelibInterface:: Warning - type library with info %s is not being remembered!" % (info,))
# and drop any version redirects to it
for key, val in list(versionRedirectMap.items()):
if val==info:
del versionRedirectMap[key]
def EnsureModule(typelibCLSID, lcid, major, minor, progressInstance = None, bValidateFile=not is_readonly, bForDemand = bForDemandDefault, bBuildHidden = 1):
"""Ensure Python support is loaded for a type library, generating if necessary.
Given the IID, LCID and version information for a type library, check and if
necessary (re)generate, then import the necessary support files. If we regenerate the file, there
is no way to totally snuff out all instances of the old module in Python, and thus we will regenerate the file more than necessary,
unless makepy/genpy is modified accordingly.
Returns the Python module. No exceptions are caught during the generate process.
Params
typelibCLSID -- IID of the type library.
major -- Integer major version.
minor -- Integer minor version
lcid -- Integer LCID for the library.
progressInstance -- Instance to use as progress indicator, or None to
use the GUI progress bar.
bValidateFile -- Whether or not to perform cache validation or not
bForDemand -- Should a complete generation happen now, or on demand?
bBuildHidden -- Should hidden members/attributes etc be generated?
"""
bReloadNeeded = 0
try:
try:
module = GetModuleForTypelib(typelibCLSID, lcid, major, minor)
except ImportError:
# If we get an ImportError
# We may still find a valid cache file under a different MinorVersion #
# (which windows will search out for us)
#print "Loading reg typelib", typelibCLSID, major, minor, lcid
module = None
try:
tlbAttr = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid).GetLibAttr()
# if the above line doesn't throw a pythoncom.com_error, check if
# it is actually a different lib than we requested, and if so, suck it in
if tlbAttr[1] != lcid or tlbAttr[4]!=minor:
#print "Trying 2nd minor #", tlbAttr[1], tlbAttr[3], tlbAttr[4]
try:
module = GetModuleForTypelib(typelibCLSID, tlbAttr[1], tlbAttr[3], tlbAttr[4])
except ImportError:
# We don't have a module, but we do have a better minor
# version - remember that.
minor = tlbAttr[4]
# else module remains None
except pythoncom.com_error:
# couldn't load any typelib - mod remains None
pass
if module is not None and bValidateFile:
assert not is_readonly, "Can't validate in a read-only gencache"
try:
typLibPath = pythoncom.QueryPathOfRegTypeLib(typelibCLSID, major, minor, lcid)
# windows seems to add an extra \0 (via the underlying BSTR)
# The mainwin toolkit does not add this erroneous \0
if typLibPath[-1]=='\0':
typLibPath=typLibPath[:-1]
suf = getattr(os.path, "supports_unicode_filenames", 0)
if not suf:
# can't pass unicode filenames directly - convert
try:
typLibPath=typLibPath.encode(sys.getfilesystemencoding())
except AttributeError: # no sys.getfilesystemencoding
typLibPath=str(typLibPath)
tlbAttributes = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid).GetLibAttr()
except pythoncom.com_error:
# We have a module, but no type lib - we should still
# run with what we have though - the typelib may not be
# deployed here.
bValidateFile = 0
if module is not None and bValidateFile:
assert not is_readonly, "Can't validate in a read-only gencache"
filePathPrefix = "%s\\%s" % (GetGeneratePath(), GetGeneratedFileName(typelibCLSID, lcid, major, minor))
filePath = filePathPrefix + ".py"
filePathPyc = filePathPrefix + ".py"
if __debug__:
filePathPyc = filePathPyc + "c"
else:
filePathPyc = filePathPyc + "o"
# Verify that type library is up to date.
# If we have a differing MinorVersion or genpy has bumped versions, update the file
from . import genpy
if module.MinorVersion != tlbAttributes[4] or genpy.makepy_version != module.makepy_version:
#print "Version skew: %d, %d" % (module.MinorVersion, tlbAttributes[4])
# try to erase the bad file from the cache
try:
os.unlink(filePath)
except os.error:
pass
try:
os.unlink(filePathPyc)
except os.error:
pass
if os.path.isdir(filePathPrefix):
import shutil
shutil.rmtree(filePathPrefix)
minor = tlbAttributes[4]
module = None
bReloadNeeded = 1
else:
minor = module.MinorVersion
filePathPrefix = "%s\\%s" % (GetGeneratePath(), GetGeneratedFileName(typelibCLSID, lcid, major, minor))
filePath = filePathPrefix + ".py"
filePathPyc = filePathPrefix + ".pyc"
#print "Trying py stat: ", filePath
fModTimeSet = 0
try:
pyModTime = os.stat(filePath)[8]
fModTimeSet = 1
except os.error as e:
# If .py file fails, try .pyc file
#print "Trying pyc stat", filePathPyc
try:
pyModTime = os.stat(filePathPyc)[8]
fModTimeSet = 1
except os.error as e:
pass
#print "Trying stat typelib", pyModTime
#print str(typLibPath)
typLibModTime = os.stat(typLibPath)[8]
if fModTimeSet and (typLibModTime > pyModTime):
bReloadNeeded = 1
module = None
except (ImportError, os.error):
module = None
if module is None:
# We need to build an item. If we are in a read-only cache, we
# can't/don't want to do this - so before giving up, check for
# a different minor version in our cache - according to COM, this is OK
if is_readonly:
key = str(typelibCLSID), lcid, major, minor
# If we have been asked before, get last result.
try:
return versionRedirectMap[key]
except KeyError:
pass
# Find other candidates.
items = []
for desc in GetGeneratedInfos():
if key[0]==desc[0] and key[1]==desc[1] and key[2]==desc[2]:
items.append(desc)
if items:
# Items are all identical, except for last tuple element
# We want the latest minor version we have - so just sort and grab last
items.sort()
new_minor = items[-1][3]
ret = GetModuleForTypelib(typelibCLSID, lcid, major, new_minor)
else:
ret = None
# remember and return
versionRedirectMap[key] = ret
return ret
#print "Rebuilding: ", major, minor
module = MakeModuleForTypelib(typelibCLSID, lcid, major, minor, progressInstance, bForDemand = bForDemand, bBuildHidden = bBuildHidden)
# If we replaced something, reload it
if bReloadNeeded:
module = reload(module)
AddModuleToCache(typelibCLSID, lcid, major, minor)
return module
def EnsureDispatch(prog_id, bForDemand = 1): # New fn, so we default the new demand feature to on!
"""Given a COM prog_id, return an object that is using makepy support, building if necessary"""
disp = win32com.client.Dispatch(prog_id)
if not disp.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it.
try:
ti = disp._oleobj_.GetTypeInfo()
disp_clsid = ti.GetTypeAttr()[0]
tlb, index = ti.GetContainingTypeLib()
tla = tlb.GetLibAttr()
mod = EnsureModule(tla[0], tla[1], tla[3], tla[4], bForDemand=bForDemand)
GetModuleForCLSID(disp_clsid)
# Get the class from the module.
from . import CLSIDToClass
disp_class = CLSIDToClass.GetClass(str(disp_clsid))
disp = disp_class(disp._oleobj_)
except pythoncom.com_error:
raise TypeError("This COM object can not automate the makepy process - please run makepy manually for this object")
return disp
def AddModuleToCache(typelibclsid, lcid, major, minor, verbose = 1, bFlushNow = not is_readonly):
"""Add a newly generated file to the cache dictionary.
"""
fname = GetGeneratedFileName(typelibclsid, lcid, major, minor)
mod = _GetModule(fname)
# if mod._in_gencache_ is already true, then we are reloading this
# module - this doesn't mean anything special though!
mod._in_gencache_ = 1
dict = mod.CLSIDToClassMap
info = str(typelibclsid), lcid, major, minor
for clsid, cls in dict.items():
clsidToTypelib[clsid] = info
dict = mod.CLSIDToPackageMap
for clsid, name in dict.items():
clsidToTypelib[clsid] = info
dict = mod.VTablesToClassMap
for clsid, cls in dict.items():
clsidToTypelib[clsid] = info
dict = mod.VTablesToPackageMap
for clsid, cls in dict.items():
clsidToTypelib[clsid] = info
# If this lib was previously redirected, drop it
if info in versionRedirectMap:
del versionRedirectMap[info]
if bFlushNow:
_SaveDicts()
def GetGeneratedInfos():
zip_pos = win32com.__gen_path__.find(".zip\\")
if zip_pos >= 0:
import zipfile
zip_file = win32com.__gen_path__[:zip_pos+4]
zip_path = win32com.__gen_path__[zip_pos+5:].replace("\\", "/")
zf = zipfile.ZipFile(zip_file)
infos = {}
for n in zf.namelist():
if not n.startswith(zip_path):
continue
base = n[len(zip_path)+1:].split("/")[0]
try:
iid, lcid, major, minor = base.split("x")
lcid = int(lcid)
major = int(major)
minor = int(minor)
iid = pywintypes.IID("{" + iid + "}")
except ValueError:
continue
except pywintypes.com_error:
# invalid IID
continue
infos[(iid, lcid, major, minor)] = 1
zf.close()
return list(infos.keys())
else:
# on the file system
files = glob.glob(win32com.__gen_path__+ "\\*")
ret = []
for file in files:
if not os.path.isdir(file) and not os.path.splitext(file)[1]==".py":
continue
name = os.path.splitext(os.path.split(file)[1])[0]
try:
iid, lcid, major, minor = name.split("x")
iid = pywintypes.IID("{" + iid + "}")
lcid = int(lcid)
major = int(major)
minor = int(minor)
except ValueError:
continue
except pywintypes.com_error:
# invalid IID
continue
ret.append((iid, lcid, major, minor))
return ret
def _GetModule(fname):
"""Given the name of a module in the gen_py directory, import and return it.
"""
mod_name = "win32com.gen_py.%s" % fname
mod = __import__(mod_name)
return sys.modules[mod_name]
def Rebuild(verbose = 1):
"""Rebuild the cache indexes from the file system.
"""
clsidToTypelib.clear()
infos = GetGeneratedInfos()
if verbose and len(infos): # Dont bother reporting this when directory is empty!
print("Rebuilding cache of generated files for COM support...")
for info in infos:
iid, lcid, major, minor = info
if verbose:
print("Checking", GetGeneratedFileName(*info))
try:
AddModuleToCache(iid, lcid, major, minor, verbose, 0)
except:
print("Could not add module %s - %s: %s" % (info, sys.exc_info()[0],sys.exc_info()[1]))
if verbose and len(infos): # Dont bother reporting this when directory is empty!
print("Done.")
_SaveDicts()
def _Dump():
print("Cache is in directory", win32com.__gen_path__)
# Build a unique dir
d = {}
for clsid, (typelibCLSID, lcid, major, minor) in clsidToTypelib.items():
d[typelibCLSID, lcid, major, minor] = None
for typelibCLSID, lcid, major, minor in d.keys():
mod = GetModuleForTypelib(typelibCLSID, lcid, major, minor)
print("%s - %s" % (mod.__doc__, typelibCLSID))
# Boot up
__init__()
def usage():
usageString = """\
Usage: gencache [-q] [-d] [-r]
-q - Quiet
-d - Dump the cache (typelibrary description and filename).
-r - Rebuild the cache dictionary from the existing .py files
"""
print(usageString)
sys.exit(1)
if __name__=='__main__':
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "qrd")
except getopt.error as message:
print(message)
usage()
# we only have options - complain about real args, or none at all!
if len(sys.argv)==1 or args:
print(usage())
verbose = 1
for opt, val in opts:
if opt=='-d': # Dump
_Dump()
if opt=='-r':
Rebuild(verbose)
if opt=='-q':
verbose = 0
| apache-2.0 | -7,204,068,276,579,343,000 | 33.218208 | 157 | 0.704633 | false |
jellis18/DNestPy | Level.py | 2 | 4211 | import numpy as np
class Level:
"""
Defines a Nested Sampling level
"""
def __init__(self, logX=0.0, logL=[-1E300, 0.0]):
"""
Construct a level. By default, logX = 0
and logL = [-1E300, 0.0]. i.e. the prior.
I use -1E300 for compatibility with the C++ version.
"""
self.logX, self.logL = logX, logL
self.accepts = 0
self.tries = 0
self.exceeds = 0
self.visits = 0
def renormaliseVisits(regularisation):
"""
Make level stats of order `regularisation`
"""
if self.tries >= regularisation:
self.accepts = int(float(self.accepts+1)/(self.tries+1)*regularisation)
self.tries = regularisation
if self.visits >= regularisation:
self.exceeds = int(float(self.exceeds+1)/(self.visits+1)*regularisation)
self.visits = regularisation
def __str__(self):
"""
Represent the level as a string
"""
s = str(self.logX) + " " + str(self.logL[0]) + " " \
+ str(self.logL[1]) + " "\
+ str(self.accepts) + " "\
+ str(self.tries) + " " + str(self.exceeds) + " "\
+ str(self.visits) + " "
return s
class LevelSet:
"""
Defines a set of levels. Implemented as a list
"""
def __init__(self, filename=None):
"""
Optional: load from file `filename`
"""
self.levels = []
self.logLKeep = [] # Accumulation, for making new levels
if filename == None:
# Start with one level, the prior
self.levels.append(Level())
else:
f = open('levels.txt', 'r')
lines = f.readlines()
for l in lines:
stuff = l.split()
level = Level(logX=float(stuff[0])\
,logL=[float(stuff[1]), float(stuff[2])])
level.accepts = int(stuff[3])
level.tries = int(stuff[4])
level.exceeds = int(stuff[5])
level.visits = int(stuff[6])
self.levels.append(level)
f.close()
def updateAccepts(self, index, accepted):
"""
Input: `index`: which level particle was in
`accepted`: whether it was accepted or not
"""
self.levels[index].accepts += int(accepted)
self.levels[index].tries += 1
def updateExceeds(self, index, logL):
"""
Input: `index`: which level particle is in
logL: its logLikelihood
"""
if index < (len(self.levels)-1):
self.levels[index].visits += 1
if logL >= self.levels[index+1].logL:
self.levels[index].exceeds += 1
def recalculateLogX(self, regularisation):
"""
Re-estimate the logX values for the levels
using the exceeds/visits information.
"""
self.levels[0].logX = 0.0
q = np.exp(-1.0)
for i in xrange(1, len(self.levels)):
self.levels[i].logX = self.levels[i-1].logX \
+ np.log(float(self.levels[i-1].exceeds + q*regularisation)/(self.levels[i-1].visits + regularisation))
def renormaliseVisits(self, regularisation):
"""
Reset all visits, exceeds etc to be of order
regularisation
"""
for level in self.levels:
level.renormaliseVisits(regularisation)
def updateLogLKeep(self, logL):
"""
If the logLikelihood is above the highest level,
store it.
Input: logLikelihood seen
"""
if logL > self.levels[-1].logL:
self.logLKeep.append(logL)
def maybeAddLevel(self, newLevelInterval):
added = False
if len(self.logLKeep) >= newLevelInterval:
self.logLKeep = sorted(self.logLKeep)
index = int(0.63212*len(self.logLKeep))
print("# Creating level " + str(len(self.levels))\
+ " with logL = "\
+ str(self.logLKeep[index][0]))
newLevel = Level(self.levels[-1].logX - 1.0,\
self.logLKeep[index])
self.levels.append(newLevel)
self.logLKeep = self.logLKeep[index+1:]
added = True
if len(self.logLKeep) == newLevelInterval:
self.renormaliseVisits(newLevelInterval)
return added
def save(self, filename='levels.txt'):
"""
Write out all of the levels to a text file.
Default filename='levels.txt'
"""
f = open(filename, 'w')
f.write(str(self))
f.close()
def __getitem__(self, i):
"""
This is like overloading operator [] (LevelSet, int)
"""
return self.levels[i]
def __str__(self):
"""
Put all levels in a single string, each level on a line
"""
return "".join([str(l) + '\n' for l in self.levels])
def __len__(self):
"""
Return number of levels
"""
return len(self.levels)
if __name__ == '__main__':
levels = LevelSet()
levels.save('test.txt')
| gpl-3.0 | -7,296,108,088,568,520,000 | 24.993827 | 107 | 0.64189 | false |
tedelhourani/ansible | lib/ansible/modules/messaging/rabbitmq_plugin.py | 85 | 4566 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_plugin
short_description: Manage RabbitMQ plugins
description:
- Manage RabbitMQ plugins.
version_added: "1.1"
author:
- Chris Hoffman (@chrishoffman)
options:
names:
description:
- Comma-separated list of plugin names.
required: true
aliases: [name]
new_only:
description:
- Only enable missing plugins.
- Does not disable plugins that are not in the names list.
type: bool
default: "no"
state:
description:
- Specify if plugins are to be enabled or disabled.
default: enabled
choices: [enabled, disabled]
prefix:
description:
- Specify a custom install prefix to a Rabbit.
version_added: "1.3"
'''
EXAMPLES = '''
- name: Enables the rabbitmq_management plugin
rabbitmq_plugin:
names: rabbitmq_management
state: enabled
'''
RETURN = '''
enabled:
description: list of plugins enabled during task run
returned: always
type: list
sample: ["rabbitmq_management"]
disabled:
description: list of plugins disabled during task run
returned: always
type: list
sample: ["rabbitmq_management"]
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import AnsibleModule
class RabbitMqPlugins(object):
def __init__(self, module):
self.module = module
if module.params['prefix']:
if os.path.isdir(os.path.join(module.params['prefix'], 'bin')):
bin_path = os.path.join(module.params['prefix'], 'bin')
elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')):
bin_path = os.path.join(module.params['prefix'], 'sbin')
else:
# No such path exists.
raise Exception("No binary folder in prefix %s" %
module.params['prefix'])
self._rabbitmq_plugins = bin_path + "/rabbitmq-plugins"
else:
self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmq_plugins]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get_all(self):
list_output = self._exec(['list', '-E', '-m'], True)
plugins = []
for plugin in list_output:
if not plugin:
break
plugins.append(plugin)
return plugins
def enable(self, name):
self._exec(['enable', name])
def disable(self, name):
self._exec(['disable', name])
def main():
arg_spec = dict(
names=dict(required=True, aliases=['name']),
new_only=dict(default='no', type='bool'),
state=dict(default='enabled', choices=['enabled', 'disabled']),
prefix=dict(required=False, default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
result = dict()
names = module.params['names'].split(',')
new_only = module.params['new_only']
state = module.params['state']
rabbitmq_plugins = RabbitMqPlugins(module)
enabled_plugins = rabbitmq_plugins.get_all()
enabled = []
disabled = []
if state == 'enabled':
if not new_only:
for plugin in enabled_plugins:
if plugin not in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
for name in names:
if name not in enabled_plugins:
rabbitmq_plugins.enable(name)
enabled.append(name)
else:
for plugin in enabled_plugins:
if plugin in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
result['changed'] = len(enabled) > 0 or len(disabled) > 0
result['enabled'] = enabled
result['disabled'] = disabled
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,831,803,007,854,848,000 | 26.672727 | 92 | 0.599869 | false |
lpirl/ansible | lib/ansible/module_utils/eos.py | 7 | 8752 | #
# (c) 2015 Peter Sprygada, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import re
from ansible.module_utils.basic import AnsibleModule, env_fallback, get_exception
from ansible.module_utils.shell import Shell, ShellError, Command, HAS_PARAMIKO
from ansible.module_utils.netcfg import parse
from ansible.module_utils.urls import fetch_url
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
NET_COMMON_ARGS = dict(
host=dict(required=True),
port=dict(type='int'),
username=dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
password=dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD'])),
ssh_keyfile=dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
authorize=dict(default=False, fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
auth_pass=dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS'])),
transport=dict(default='cli', choices=['cli', 'eapi']),
use_ssl=dict(default=True, type='bool'),
provider=dict(type='dict')
)
CLI_PROMPTS_RE = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
CLI_ERRORS_RE = [
re.compile(r"% ?Error"),
re.compile(r"^% \w+", re.M),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
re.compile(r"[^\r\n]\/bin\/(?:ba)?sh")
]
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class Eapi(object):
def __init__(self, module):
self.module = module
# sets the module_utils/urls.py req parameters
self.module.params['url_username'] = module.params['username']
self.module.params['url_password'] = module.params['password']
self.url = None
self.enable = None
def _get_body(self, commands, encoding, reqid=None):
"""Create a valid eAPI JSON-RPC request message
"""
params = dict(version=1, cmds=commands, format=encoding)
return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
def connect(self):
host = self.module.params['host']
port = self.module.params['port']
if self.module.params['use_ssl']:
proto = 'https'
if not port:
port = 443
else:
proto = 'http'
if not port:
port = 80
self.url = '%s://%s:%s/command-api' % (proto, host, port)
def authorize(self):
if self.module.params['auth_pass']:
passwd = self.module.params['auth_pass']
self.enable = dict(cmd='enable', input=passwd)
else:
self.enable = 'enable'
def send(self, commands, encoding='json'):
"""Send commands to the device.
"""
clist = to_list(commands)
if self.enable is not None:
clist.insert(0, self.enable)
data = self._get_body(clist, encoding)
data = self.module.jsonify(data)
headers = {'Content-Type': 'application/json-rpc'}
response, headers = fetch_url(self.module, self.url, data=data,
headers=headers, method='POST')
if headers['status'] != 200:
self.module.fail_json(**headers)
response = self.module.from_json(response.read())
if 'error' in response:
err = response['error']
self.module.fail_json(msg='json-rpc error', commands=commands, **err)
if self.enable:
response['result'].pop(0)
return response['result']
class Cli(object):
def __init__(self, module):
self.module = module
self.shell = None
def connect(self, **kwargs):
host = self.module.params['host']
port = self.module.params['port'] or 22
username = self.module.params['username']
password = self.module.params['password']
key_filename = self.module.params['ssh_keyfile']
try:
self.shell = Shell(prompts_re=CLI_PROMPTS_RE, errors_re=CLI_ERRORS_RE)
self.shell.open(host, port=port, username=username, password=password, key_filename=key_filename)
except ShellError:
e = get_exception()
msg = 'failed to connect to %s:%s - %s' % (host, port, str(e))
self.module.fail_json(msg=msg)
def authorize(self):
passwd = self.module.params['auth_pass']
self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd))
def send(self, commands):
try:
return self.shell.send(commands)
except ShellError:
e = get_exception()
self.module.fail_json(msg=e.message, commands=commands)
class NetworkModule(AnsibleModule):
def __init__(self, *args, **kwargs):
super(NetworkModule, self).__init__(*args, **kwargs)
self.connection = None
self._config = None
self._connected = False
@property
def connected(self):
return self._connected
@property
def config(self):
if not self._config:
self._config = self.get_config()
return self._config
def _load_params(self):
super(NetworkModule, self)._load_params()
provider = self.params.get('provider') or dict()
for key, value in provider.items():
if key in NET_COMMON_ARGS:
if self.params.get(key) is None and value is not None:
self.params[key] = value
def connect(self):
cls = globals().get(str(self.params['transport']).capitalize())
try:
self.connection = cls(self)
except TypeError:
e = get_exception()
self.fail_json(msg=e.message)
self.connection.connect()
self.connection.send('terminal length 0')
if self.params['authorize']:
self.connection.authorize()
self._connected = True
def configure(self, commands, replace=False):
if replace:
responses = self.config_replace(commands)
else:
responses = self.config_terminal(commands)
return responses
def config_terminal(self, commands):
commands = to_list(commands)
commands.insert(0, 'configure terminal')
responses = self.execute(commands)
responses.pop(0)
return responses
def config_replace(self, commands):
if self.params['transport'] == 'cli':
self.fail_json(msg='config replace only supported over eapi')
cmd = 'configure replace terminal:'
commands = '\n'.join(to_list(commands))
command = dict(cmd=cmd, input=commands)
return self.execute(command)
def execute(self, commands, **kwargs):
if not self.connected:
self.connect()
return self.connection.send(commands, **kwargs)
def disconnect(self):
self.connection.close()
self._connected = False
def parse_config(self, cfg):
return parse(cfg, indent=3)
def get_config(self):
cmd = 'show running-config'
if self.params.get('include_defaults'):
cmd += ' all'
if self.params['transport'] == 'cli':
return self.execute(cmd)[0]
else:
resp = self.execute(cmd, encoding='text')
return resp[0]['output']
def get_module(**kwargs):
"""Return instance of NetworkModule
"""
argument_spec = NET_COMMON_ARGS.copy()
if kwargs.get('argument_spec'):
argument_spec.update(kwargs['argument_spec'])
kwargs['argument_spec'] = argument_spec
module = NetworkModule(**kwargs)
if module.params['transport'] == 'cli' and not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required but does not appear to be installed')
return module
| gpl-3.0 | -1,457,779,457,341,224,000 | 31.058608 | 109 | 0.601348 | false |
iakovos-panourgias/fluidity | python/fluidity_ufl/ufl_fortran.py | 5 | 7167 |
import parse_ufl
detwei=("detwei", "detwei", "real, dimension(ngi), intent(in) :: detwei")
class fortran_module():
def __init__(self, filename):
"""Given a ufl file produce a fortran module implementing that
ufl. The module will have the name basename_module where basename is
filename with any trailing .ufl stripped. The module will have a
single public subroutine named basename. Basename will take a
state_type as an argument."""
self.filename=filename
if filename[-4:]==".ufl":
self.basename=filename[:-4]
else:
self.basename=filename
self.parsed_ufl=parse_ufl.parse_file(filename)
class fortran_integral():
def __init__(self, integral, dim="dim"):
err=integral.invalid()
if err:
raise TypeError, err
self.integral=integral
self.test_dim="ele_loc("+self.integral.test.name+")"
try:
self.trial_dim="ele_loc("+self.integral.trial.name+")"
except AttributeError:
self.trial_dim=None
# Dimension of the problem space.
self.dim=dim
self.set_arguments()
self.map_sum_indices()
def fortran(self):
dim_indices=["dim"+`i`+"_i" for i in range(self.integral.rank)]
name, declaration, lhs=self.function_spec(dim_indices)
decs=self.argument_declarations+[declaration]
core_indices="integer :: gi, test_i"
if self.integral.trial:
core_indices=core_indices+", trial_i"
decs=decs+[core_indices]
if dim_indices:
decs=decs+["integer :: "+",".join(dim_indices)]
if self.sum_index_count:
sum_indices="integer :: "+\
", ".join(["i"+`i+1` for i in
range(self.sum_index_count)])
decs=decs+[sum_indices]
body_lhs=[lhs+" = "+lhs+"&"]
body = "+ "
# Deep copy of dim_indices to be consumed in the next loop.
ldim_indices=list(dim_indices)
for f in self.iterfunctions():
body=body+self.index_function(f, ldim_indices)+"*"
body=body+"detwei(gi)"
body=body_lhs+indent([body])
# Note that the innermost loops comes first.
for index in dim_indices:
body=do_loop(index,self.dim,body)
for index in range(self.sum_index_count):
body=do_loop("i"+`index+1`,self.dim,body)
body=do_loop("gi","ngi",body)
if self.integral.trial:
body=do_loop("trial_i",self.trial_dim,body)
body=do_loop("test_i",self.test_dim,body)
code=decs
code=code+[""]
code=code+["integral=0.0"]
code=code+[""]
code=code+body
code=code+[""]
code=indent(code)
code=["function "+name+"("+", ".join(self.dummy_arguments)+")"\
+" result (integral)"]+code
code=code+["end function "+name]
return code
def index_function(self,f, dim_indices):
""" Produce a string containing the name of f and the appropriate
indices"""
index_code=[]
for i in f.indices:
if (isinstance(i, slice)):
index=dim_indices[0]
dim_indices.remove(index)
index_code.append(index)
else:
index="i"+`self.sum_index_map[i.id]`
index_code.append(index)
# Now the special indices for basis functions.
if f.trial:
index_code.append("trial_i")
if f.test:
index_code.append("test_i")
# We always need to integrate over quadrature points.
index_code.append("gi")
code=f.name+"("+",".join(index_code)+")"
return code
def iterfunctions(self):
"""Generator enabling iteration of the the functions of a
fortran_integral in the order they appear in the argument list."""
yield self.integral.test
for f in self.integral.functions:
# We've already done test and trial.
if f.test or f.trial:
continue
yield f
if self.integral.trial:
yield self.integral.trial
def function_spec(self, dim_indices):
name="integral"
declaration="real, dimension("
lhs_args=dim_indices+["test_i"]
for f in self.iterfunctions():
name=name+"_"+f.name
for i in f.indices:
# Slices are free indices.
if isinstance(i, slice):
declaration=declaration+str(self.dim)+", "
else:
name=name+"_i"+`self.sum_index_map[i.id]`
name=name+"_"+self.integral.measure.name
declaration=declaration+self.test_dim+", "
if self.integral.trial:
declaration=declaration+self.trial_dim+", "
lhs_args=lhs_args+["trial_i"]
declaration=declaration+"ngi) :: integral"
lhs="integral("+", ".join(lhs_args)+")"
return name, declaration, lhs
def map_sum_indices(self):
"""Form a list of local index names corresponding to the global
indices recorded in the integral"""
self.sum_index_map={}
self.sum_index_count=0
for ii in self.integral.sum_indices.iterkeys():
if (not self.sum_index_map.has_key(ii)):
self.sum_index_count=self.sum_index_count+1
self.sum_index_map[ii]=self.sum_index_count
def set_arguments(self):
self.argument_declarations=[]
self.variable_declarations=[]
self.dummy_arguments=[]
self.actual_arguments=[]
for f in self.iterfunctions():
args=function_to_arguments(f)
self.dummy_arguments.append(args[0])
self.actual_arguments.append(args[1])
self.argument_declarations.append(args[2])
# You always need detwei.
self.dummy_arguments.append(detwei[0])
self.actual_arguments.append(detwei[1])
self.argument_declarations.append(detwei[2])
def function_to_arguments(function):
'''Take a function object and return actual and dummy argument names as
well as a declaration of the dummy argument'''
actual=function.name
dummy=function.name
if isinstance(function, parse_ufl.BasisFunction):
declaration="element_type, intent(in) :: "+dummy
else:
if function.test or function.trial:
extra_args=2
else:
extra_args=1
declaration="real, dimension("+",".join([":" for i in
range(function.rank+extra_args)])\
+"), intent(in) :: "+dummy
return actual, dummy, declaration
def indent(code):
return [" "+line for line in code]
def do_loop(var, size, body):
code=["do "+var+" = 1, "+str(size)]\
+ indent(body)\
+["end do"]
return code
| lgpl-2.1 | -6,694,187,486,542,812,000 | 28.493827 | 83 | 0.550021 | false |
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder2.0-libraries-swak4Foam | Examples/FromPresentations/OFW8_landspeedersInCanyon/system/makeSlices.py | 4 | 2042 | nr=int(nrPlanes)
dx=(maxX-minX)/nr
planeTemplate="""
createsPlane%(nr)d
{
type createSampledSurface;
outputControl timeStep;
outputInterval 1;
surfaceName autoPlane%(nr)d;
surface {
type plane;
basePoint (%(x)f 0 0);
normalVector (1 0 0);
interpolate true;
}
writeSurfaceOnConstruction true;
autoWriteSurface true;
surfaceFormat vtk;
}
"""
exhaustAverageTemplate="""
exhaustAveragePlane%(nr)d
{
type swakExpression;
valueType surface;
surfaceName autoPlane%(nr)d;
verbose true;
expression "exhaust";
accumulations (
min
max
weightedAverage
);
}
exhaustAveragePlaneAverage%(nr)d
{
type swakExpression;
valueType surface;
surfaceName autoPlane%(nr)d;
verbose true;
expression "exhaust*area()/sum(area())";
accumulations (
sum
);
}
"""
addToVars="""
appendToVariable%(nr)d {
type calculateGlobalVariables;
outputControl timeStep;
outputInterval 1;
valueType surface;
surfaceName autoPlane%(nr)d;
toGlobalNamespace planeValues;
toGlobalVariables (
positions
exhaustAverage
exhaustHeight
exhaustHeight2
);
variables (
"positions=average(pos().x);"
"exhaustAverage=sum(exhaust*area())/sum(area());"
"exhaustHeight=max(exhaust>0.001 ? pos().z : -1);"
"exhaustTotal=exhaustAverage>0.001 ? sum(exhaust*area()) : sum(0.001*area());"
"exhaustHeight2=sum(exhaust*pos().z*area())/exhaustTotal;"
);
}
"""
print "functions {"
for i in range(nr):
x=dx*(i+0.5)+minX
data= { "nr": i, "x": x }
print planeTemplate % data
# print exhaustAverageTemplate % data
print addToVars % data
print "}"
| gpl-2.0 | -1,848,523,350,423,611,600 | 23.902439 | 91 | 0.542605 | false |
fspaolo/scikit-learn | examples/linear_model/plot_logistic.py | 8 | 1400 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
pl.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
pl.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
pl.axhline(.5, color='.5')
pl.ylabel('y')
pl.xlabel('X')
pl.xticks(())
pl.yticks(())
pl.ylim(-.25, 1.25)
pl.xlim(-4, 10)
pl.show()
| bsd-3-clause | -8,901,926,055,947,480,000 | 20.538462 | 65 | 0.607857 | false |
dufferzafar/picard | picard/formats/vorbis.py | 3 | 11797 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import base64
import mutagen.flac
import mutagen.ogg
import mutagen.oggflac
import mutagen.oggspeex
import mutagen.oggtheora
import mutagen.oggvorbis
try:
from mutagen.oggopus import OggOpus
with_opus = True
except ImportError:
OggOpus = None
with_opus = False
from picard import config, log
from picard.coverart.image import TagCoverArtImage, CoverArtImageError
from picard.file import File
from picard.formats.id3 import types_from_id3, image_type_as_id3_num
from picard.metadata import Metadata
from picard.util import encode_filename, sanitize_date
class VCommentFile(File):
"""Generic VComment-based file."""
_File = None
__translate = {
"musicbrainz_trackid": "musicbrainz_recordingid",
"musicbrainz_releasetrackid": "musicbrainz_trackid",
}
__rtranslate = dict([(v, k) for k, v in __translate.iteritems()])
def _load(self, filename):
log.debug("Loading file %r", filename)
file = self._File(encode_filename(filename))
file.tags = file.tags or {}
metadata = Metadata()
for origname, values in file.tags.items():
for value in values:
name = origname
if name == "date" or name == "originaldate":
# YYYY-00-00 => YYYY
value = sanitize_date(value)
elif name == 'performer' or name == 'comment':
# transform "performer=Joe Barr (Piano)" to "performer:Piano=Joe Barr"
name += ':'
if value.endswith(')'):
start = len(value) - 2
count = 1
while count > 0 and start > 0:
if value[start] == ')':
count += 1
elif value[start] == '(':
count -= 1
start -= 1
if start > 0:
name += value[start + 2:-1]
value = value[:start]
elif name.startswith('rating'):
try:
name, email = name.split(':', 1)
except ValueError:
email = ''
if email != config.setting['rating_user_email']:
continue
name = '~rating'
value = unicode(int(round((float(value) * (config.setting['rating_steps'] - 1)))))
elif name == "fingerprint" and value.startswith("MusicMagic Fingerprint"):
name = "musicip_fingerprint"
value = value[22:]
elif name == "tracktotal":
if "totaltracks" in file.tags:
continue
name = "totaltracks"
elif name == "disctotal":
if "totaldiscs" in file.tags:
continue
name = "totaldiscs"
elif name == "metadata_block_picture":
image = mutagen.flac.Picture(base64.standard_b64decode(value))
try:
coverartimage = TagCoverArtImage(
file=filename,
tag=name,
types=types_from_id3(image.type),
comment=image.desc,
support_types=True,
data=image.data,
)
except CoverArtImageError as e:
log.error('Cannot load image from %r: %s' % (filename, e))
else:
metadata.append_image(coverartimage)
continue
elif name in self.__translate:
name = self.__translate[name]
metadata.add(name, value)
if self._File == mutagen.flac.FLAC:
for image in file.pictures:
try:
coverartimage = TagCoverArtImage(
file=filename,
tag='FLAC/PICTURE',
types=types_from_id3(image.type),
comment=image.desc,
support_types=True,
data=image.data,
)
except CoverArtImageError as e:
log.error('Cannot load image from %r: %s' % (filename, e))
else:
metadata.append_image(coverartimage)
# Read the unofficial COVERART tags, for backward compatibillity only
if not "metadata_block_picture" in file.tags:
try:
for data in file["COVERART"]:
try:
coverartimage = TagCoverArtImage(
file=filename,
tag='COVERART',
data=base64.standard_b64decode(data)
)
except CoverArtImageError as e:
log.error('Cannot load image from %r: %s' % (filename, e))
else:
metadata.append_image(coverartimage)
except KeyError:
pass
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
"""Save metadata to the file."""
log.debug("Saving file %r", filename)
is_flac = self._File == mutagen.flac.FLAC
file = self._File(encode_filename(filename))
if file.tags is None:
file.add_tags()
if config.setting["clear_existing_tags"]:
file.tags.clear()
if (is_flac and (config.setting["clear_existing_tags"] or
metadata.images_to_be_saved_to_tags)):
file.clear_pictures()
tags = {}
for name, value in metadata.items():
if name == '~rating':
# Save rating according to http://code.google.com/p/quodlibet/wiki/Specs_VorbisComments
if config.setting['rating_user_email']:
name = 'rating:%s' % config.setting['rating_user_email']
else:
name = 'rating'
value = unicode(float(value) / (config.setting['rating_steps'] - 1))
# don't save private tags
elif name.startswith("~"):
continue
if name.startswith('lyrics:'):
name = 'lyrics'
elif name == "date" or name == "originaldate":
# YYYY-00-00 => YYYY
value = sanitize_date(value)
elif name.startswith('performer:') or name.startswith('comment:'):
# transform "performer:Piano=Joe Barr" to "performer=Joe Barr (Piano)"
name, desc = name.split(':', 1)
if desc:
value += ' (%s)' % desc
elif name == "musicip_fingerprint":
name = "fingerprint"
value = "MusicMagic Fingerprint%s" % value
elif name in self.__rtranslate:
name = self.__rtranslate[name]
tags.setdefault(name.upper().encode('utf-8'), []).append(value)
if "totaltracks" in metadata:
tags.setdefault(u"TRACKTOTAL", []).append(metadata["totaltracks"])
if "totaldiscs" in metadata:
tags.setdefault(u"DISCTOTAL", []).append(metadata["totaldiscs"])
for image in metadata.images_to_be_saved_to_tags:
picture = mutagen.flac.Picture()
picture.data = image.data
picture.mime = image.mimetype
picture.desc = image.comment
picture.type = image_type_as_id3_num(image.maintype)
if self._File == mutagen.flac.FLAC:
file.add_picture(picture)
else:
tags.setdefault(u"METADATA_BLOCK_PICTURE", []).append(
base64.standard_b64encode(picture.write()))
file.tags.update(tags)
kwargs = {}
if is_flac and config.setting["remove_id3_from_flac"]:
kwargs["deleteid3"] = True
try:
file.save(**kwargs)
except TypeError:
file.save()
class FLACFile(VCommentFile):
"""FLAC file."""
EXTENSIONS = [".flac"]
NAME = "FLAC"
_File = mutagen.flac.FLAC
def _info(self, metadata, file):
super(FLACFile, self)._info(metadata, file)
metadata['~format'] = self.NAME
class OggFLACFile(VCommentFile):
"""FLAC file."""
EXTENSIONS = [".oggflac"]
NAME = "Ogg FLAC"
_File = mutagen.oggflac.OggFLAC
def _info(self, metadata, file):
super(OggFLACFile, self)._info(metadata, file)
metadata['~format'] = self.NAME
class OggSpeexFile(VCommentFile):
"""Ogg Speex file."""
EXTENSIONS = [".spx"]
NAME = "Speex"
_File = mutagen.oggspeex.OggSpeex
def _info(self, metadata, file):
super(OggSpeexFile, self)._info(metadata, file)
metadata['~format'] = self.NAME
class OggTheoraFile(VCommentFile):
"""Ogg Theora file."""
EXTENSIONS = [".oggtheora"]
NAME = "Ogg Theora"
_File = mutagen.oggtheora.OggTheora
def _info(self, metadata, file):
super(OggTheoraFile, self)._info(metadata, file)
metadata['~format'] = self.NAME
class OggVorbisFile(VCommentFile):
"""Ogg Vorbis file."""
EXTENSIONS = [".ogg"]
NAME = "Ogg Vorbis"
_File = mutagen.oggvorbis.OggVorbis
def _info(self, metadata, file):
super(OggVorbisFile, self)._info(metadata, file)
metadata['~format'] = self.NAME
class OggOpusFile(VCommentFile):
"""Ogg Opus file."""
EXTENSIONS = [".opus"]
NAME = "Ogg Opus"
_File = OggOpus
def _info(self, metadata, file):
super(OggOpusFile, self)._info(metadata, file)
metadata['~format'] = self.NAME
def _select_ogg_type(filename, options):
"""Select the best matching Ogg file type."""
fileobj = file(filename, "rb")
results = []
try:
header = fileobj.read(128)
results = [
(option._File.score(filename, fileobj, header), option.__name__, option)
for option in options]
finally:
fileobj.close()
results.sort()
if not results or results[-1][0] <= 0:
raise mutagen.ogg.error("unknown Ogg audio format")
return results[-1][2](filename)
def OggAudioFile(filename):
"""Generic Ogg audio file."""
options = [OggFLACFile, OggSpeexFile, OggVorbisFile]
return _select_ogg_type(filename, options)
OggAudioFile.EXTENSIONS = [".oga"]
OggAudioFile.NAME = "Ogg Audio"
def OggVideoFile(filename):
"""Generic Ogg video file."""
options = [OggTheoraFile]
return _select_ogg_type(filename, options)
OggVideoFile.EXTENSIONS = [".ogv"]
OggVideoFile.NAME = "Ogg Video"
| gpl-2.0 | -363,768,341,288,845,440 | 35.177914 | 103 | 0.535696 | false |
NervanaSystems/coach | rl_coach/presets/CartPole_ACER.py | 1 | 2114 | from rl_coach.agents.acer_agent import ACERAgentParameters
from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.filters.filter import InputFilter
from rl_coach.filters.reward.reward_rescale_filter import RewardRescaleFilter
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.memories.memory import MemoryGranularity
####################
# Graph Scheduling #
####################
schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(10000000000)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(10)
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(0)
#########
# Agent #
#########
agent_params = ACERAgentParameters()
agent_params.algorithm.num_steps_between_gradient_updates = 5
agent_params.algorithm.ratio_of_replay = 4
agent_params.algorithm.num_transitions_to_start_replay = 1000
agent_params.memory.max_size = (MemoryGranularity.Transitions, 50000)
agent_params.input_filter = InputFilter()
agent_params.input_filter.add_reward_filter('rescale', RewardRescaleFilter(1/200.))
agent_params.algorithm.beta_entropy = 0.0
###############
# Environment #
###############
env_params = GymVectorEnvironment(level='CartPole-v0')
########
# Test #
########
preset_validation_params = PresetValidationParameters()
preset_validation_params.test = True
preset_validation_params.min_reward_threshold = 150
preset_validation_params.max_episodes_to_achieve_reward = 300
preset_validation_params.num_workers = 1
graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
schedule_params=schedule_params, vis_params=VisualizationParameters(),
preset_validation_params=preset_validation_params)
| apache-2.0 | 285,189,701,800,660,000 | 42.142857 | 106 | 0.759224 | false |
wuzhy/autotest | server/source_kernel.py | 3 | 1971 | # Copyright 2007 Google Inc. Released under the GPL v2
"""
This module defines the SourceKernel class
SourceKernel: an linux kernel built from source
"""
from autotest_lib.server import kernel, autotest
class SourceKernel(kernel.Kernel):
"""
This class represents a linux kernel built from source.
It is used to obtain a built kernel or create one from source and
install it on a Host.
Implementation details:
This is a leaf class in an abstract class hierarchy, it must
implement the unimplemented methods in parent classes.
"""
def __init__(self, k):
super(kernel.Kernel, self).__init__()
self.__kernel = k
self.__patch_list = []
self.__config_file = None
self.__autotest = autotest.Autotest()
def configure(self, configFile):
self.__config_file = configFile
def patch(self, patchFile):
self.__patch_list.append(patchFile)
def build(self, host):
ctlfile = self.__control_file(self.__kernel, self.__patch_list,
self.__config_file)
self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
def install(self, host):
self.__autotest.install(host)
ctlfile = ("testkernel = job.kernel('%s')\n"
"testkernel.install()\n"
"testkernel.add_to_bootloader()\n" %(self.__kernel))
self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
def __control_file(self, kernel, patch_list, config):
ctl = ("testkernel = job.kernel('%s')\n" % kernel)
if len(patch_list):
patches = ', '.join(["'%s'" % x for x in patch_list])
ctl += "testkernel.patch(%s)\n" % patches
if config:
ctl += "testkernel.config('%s')\n" % config
else:
ctl += "testkernel.config('', None, True)\n"
ctl += "testkernel.build()\n"
# copy back to server
return ctl
| gpl-2.0 | -214,378,712,727,944,500 | 27.157143 | 71 | 0.583968 | false |
yetu/ansible-deployment | library/initd.py | 2 | 7967 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# WARNING This code is still under development
#
__author__ = 'Adham Helal'
#TODO: escape $GREP_FOR argument. For now, make sure to escape any '-' symbols in GREP_FOR.
DOCUMENTATION = '''
---
'''
initd_string = '''#!/bin/bash
# export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
NAME="$name"
NAME="${NAME}_service"
DESC="$desc"
RUN_AS_USER="$run_user"
RUN_AS_GROUP="$run_group"
BASE_DIR="$base_dir"
GREP_FOR="$grep_for"
EXEC_CMD="$exec_cmd"
EXEC_ARGS="$exec_args"
STOP_CMD="$stop_cmd"
TIMEOUT=$timeout
TIMEOUT_START=$timeout_start
LOG_FILE=$log_file
WAIT_TILL_TIMEOUT="$wait_till_timeout"
START_METHOD="$start_method"
## Play Specific stuff
PLAY_PID_FILE=$BASE_DIR/RUNNING_PID
## Flag Variables
START_CODE=0
STOP_CODE=1
# 0 process exist, 1 process does not exist
isRunning() {
pgrep -f "$GREP_FOR" &> /dev/null
echo $?
}
GetPid() {
pgrep -d " " -f "$GREP_FOR"
echo $!
}
ProcessOk() { echo "[ OK ]"; }
ProcessFailed() { echo "[ FAILED ]" && exit 1; }
GetCond(){
# $1 final call out of time? 0 : no 1: yes
# $2 failure code? 0 : process should exist 1: process should not exist
PID_STATUS=$(isRunning)
if [ $1 -eq 0 ]; then
if [ $WAIT_TILL_TIMEOUT -eq 1 ]; then
echo -n "."
else
[ $PID_STATUS -eq $2 ] && ProcessOk || echo -n "."
fi
else
[ $PID_STATUS -eq $2 ] && ProcessOk || ProcessFailed
fi
}
WaitN(){
#
# $2 = timeout in seconds
count=1;
until [ $count -ge $2 ]
do
GetCond 0 "$1"
sleep 1
let count=$count+1;
done
GetCond 1 "$1"
}
DeletePlayPID() {
if [ -f $PLAY_PID_FILE ]; then
echo "Deleting Play PID file: $PLAY_PID_FILE";
rm -f $PLAY_PID_FILE;
fi;
}
StartDaemon() {
PID_STATUS=$(isRunning)
if [ $PID_STATUS -eq $START_CODE ]; then
PID="$(GetPid)"
echo "$NAME is already running PID: $PID"
else
DeletePlayPID
echo -n "Starting $NAME "
if [ $START_METHOD == "start-stop-daemon" ]; then
#Start quite background uid and gid
start-stop-daemon --start --background --name $NAME --chdir $BASE_DIR --chuid $RUN_AS_USER --group $RUN_AS_GROUP --startas $EXEC_CMD -- $EXEC_ARGS
else
#nohup
cd $BASE_DIR
nohup sudo -u $RUN_AS_USER $EXEC_CMD $EXEC_ARGS >> $LOG_FILE 2>&1 &
fi
RETURNCODE=$?
[ $RETURNCODE -ne 0 ] && echo "[ FAILED: $RETURNCODE ]" && exit 1
WaitN $START_CODE $TIMEOUT_START
fi
}
StopDaemon() {
PID_STATUS=$(isRunning)
if [ $PID_STATUS -eq $STOP_CODE ]; then
echo "$NAME not running."
else
PID="$(GetPid)"
echo -n "Stopping $NAME "
$STOP_CMD $PID
[ $? -ne 0 ] && echo "[ FAILED ]" && exit 1
WaitN $STOP_CODE $TIMEOUT
fi
}
statusDaemon() {
PID_STATUS=$(isRunning)
if [ $PID_STATUS -eq $START_CODE ]; then
PID="$(GetPid)"
echo "$NAME is running with PID:$PID"
else
echo "$NAME is not running"
fi
}
# TODO : graceful-stop
case "$1" in
start)
StartDaemon ;;
stop)
StopDaemon ;;
restart)
StopDaemon
StartDaemon
;;
status)
statusDaemon ;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1 ;;
esac
exit 0
'''
class DaemonScript(object):
def __init__(self, module):
self.module = module
self.file_sha256 = None
self.changed = False
self.msg=""
if self.module.params["desc"] is None:
self.module.params["desc"] = self.module.params["name"] + " Daemon"
if self.module.params["base_dir"] is None:
self.module.params["base_dir"] = os.path.dirname(self.module.params["path"])
if self.module.params["grep_for"] is None:
self.module.params["grep_for"] = self.module.params["exec_cmd"]
if self.module.params["run_user"] is None:
self.module.params["run_user"] = getpass.getuser()
if self.module.params["run_group"] is None:
self.module.params["run_group"] = getpass.getuser() # NEEDS fix will break
if self.module.params["log_file"] is not None and self.module.params["start_method"] == "start-stop-daemon":
self.module.fail_json(msg="start-stop-daemon does not support logging")
self.file_args = self.module.load_file_common_arguments(module.params)
def write_to_dest(self, filename, initd_content):
try:
f = open(filename, 'w')
f.write(initd_content)
f.close()
except Exception as E:
self.module.fail_json(msg="Write error dir name %s does not exist" %
os.path.dirname(self.module.params["path"]))
def check_dest(self):
if os.path.isfile(self.module.params["path"]) and not os.access(self.module.params["path"], os.W_OK + os.R_OK):
self.module.fail_json(msg="Path %s not readable/writable" % (self.module.params["path"]))
elif os.path.isdir(os.path.dirname(self.module.params["path"])) and \
not os.access(os.path.dirname(self.module.params["path"]), os.W_OK + os.R_OK):
self.module.fail_json(msg="Destination directory %s not readable/writable" %
(os.path.dirname(self.module.params["path"])))
elif not os.path.isdir(os.path.dirname(self.module.params["path"])):
self.module.fail_json(msg="Destination dir name %s does not exist" %
os.path.dirname(self.module.params["path"]))
if os.path.isfile(self.module.params["path"]):
self.file_sha256 = self.module.sha256(self.module.params["path"])
def main(self):
self.check_dest()
initd_template = Template(initd_string)
initd_script = initd_template.safe_substitute(**self.module.params)
hash_object = _sha256(initd_script)
initd_script_dig = hash_object.hexdigest()
if initd_script_dig == self.file_sha256:
self.msg = "initd nothing needed"
else:
if self.module.check_mode:
self.changed = True
else:
self.write_to_dest(self.module.params["path"], initd_script)
self.changed = True
self.msg = "initd update"
self.changed = self.module.set_fs_attributes_if_different(self.file_args, self.changed)
self.module.exit_json(changed=self.changed, msg=self.msg)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True, alias_name="daemon_name"),
exec_cmd=dict(default=None, required=True),
path=dict(default=None, required=True),
desc=dict(default=None, required=False),
base_dir=dict(default=None, required=False),
exec_args=dict(default="", required=False),
state=dict(default="present", choices=["absent", "present"]),
grep_for=dict(default=None, required=False),
run_user=dict(default=None, required=False),
run_group=dict(default=None, required=False),
stop_cmd=dict(default="kill -9", required=False),
timeout=dict(default=5, required=False, type="int"),
timeout_start=dict(default=5, required=False, type="int"),
wait_till_timeout=dict(default=1, choices=[0, 1], required=False, type="int"),
log_file=dict(default=None, required=False),
start_method=dict(default="start-stop-daemon", choices=["start-stop-daemon", "nohup"]),
),
add_file_common_args=True,
supports_check_mode=True
)
DaemonScript(module).main()
from string import Template
import getpass
# import module snippets
from ansible.module_utils.basic import *
main()
| mit | 8,668,241,455,763,653,000 | 30.868 | 158 | 0.582026 | false |
Skytim/nccuTEG | pybossa/api/task_run.py | 4 | 2652 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
"""
PyBossa api module for exposing domain object TaskRun via an API.
This package adds GET, POST, PUT and DELETE methods for:
* task_runs
"""
from flask import request
from flask.ext.login import current_user
from pybossa.model.task_run import TaskRun
from werkzeug.exceptions import Forbidden, BadRequest
from api_base import APIBase
from pybossa.util import get_user_id_or_ip
from pybossa.core import task_repo, sentinel
class TaskRunAPI(APIBase):
"""Class API for domain object TaskRun."""
__class__ = TaskRun
reserved_keys = set(['id', 'created', 'finish_time'])
def _update_object(self, taskrun):
"""Update task_run object with user id or ip."""
# validate the task and project for that taskrun are ok
task = task_repo.get_task(taskrun.task_id)
if task is None: # pragma: no cover
raise Forbidden('Invalid task_id')
if (task.project_id != taskrun.project_id):
raise Forbidden('Invalid project_id')
if _check_task_requested_by_user(taskrun, sentinel.master) is False:
raise Forbidden('You must request a task first!')
# Add the user info so it cannot post again the same taskrun
if current_user.is_anonymous():
taskrun.user_ip = request.remote_addr
else:
taskrun.user_id = current_user.id
def _forbidden_attributes(self, data):
for key in data.keys():
if key in self.reserved_keys:
raise BadRequest("Reserved keys in payload")
def _check_task_requested_by_user(taskrun, redis_conn):
user_id_ip = get_user_id_or_ip()
usr = user_id_ip['user_id'] or user_id_ip['user_ip']
key = 'pybossa:task_requested:user:%s:task:%s' % (usr, taskrun.task_id)
task_requested = bool(redis_conn.get(key))
if user_id_ip['user_id'] is not None:
redis_conn.delete(key)
return task_requested
| agpl-3.0 | -6,216,720,680,259,834,000 | 35.833333 | 77 | 0.682881 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.