text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| {
"content_hash": "02320b6ede028b51cd0c9cc3ddf402ca",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 68,
"avg_line_length": 26.359550561797754,
"alnum_prop": 0.5541346973572038,
"repo_name": "drakeloud/louderdev",
"id": "9430fb43dab67a787dcaf757d060814180b6763e",
"size": "2629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "louderdev/bin/pilprint.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "156059"
},
{
"name": "HTML",
"bytes": "176563"
},
{
"name": "JavaScript",
"bytes": "120049"
},
{
"name": "Python",
"bytes": "168254"
},
{
"name": "Shell",
"bytes": "4206"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
from django.utils import timezone
import warnings
from collections import namedtuple
from django.test import TestCase
from ..factories import SpecFactory, JobFactory
from ..models import Job, Package, Spec
from ..tasks import query_pypi
def fake_distributions(*distributions):
Distribution = namedtuple('Distribution', ['name', 'version'])
result = []
for dist in distributions:
name, version = dist.split('==')
result.append(Distribution(name, version))
return result
def fake_requirement(name, specs):
Requirement = namedtuple('Requirement', ['name', 'specs', 'extras'])
return Requirement(name, specs, extras=[])
class JobTest(TestCase):
def setUp(self):
self.reqs_txt = """
-r some_missing_file
django>=1.4,<1.5
Django-Geoip==0.3
# tests below
coverage
coveralls>0.2
# TODO: VCS
"""
def test_can_be_created_from_requirements_txt(self):
with warnings.catch_warnings():
# We're ignoring -r not being parsed
# "Recursive requirements not supported. Skipping."
warnings.simplefilter("ignore", category=UserWarning)
job = Job.objects.create_from_requirements(self.reqs_txt)
assert job.requirements == self.reqs_txt
assert list(map(str, job.lines.all().order_by('pk'))) == [
'django>=1.4,<1.5',
'Django-Geoip==0.3',
'coverage',
'coveralls>0.2']
class JobStatusTest(TestCase):
def test_completed_if_no_specs_no_lines(self):
job = JobFactory()
assert job.status == 'success', 'No specs, no lines'
def test_pending_if_unparsed_lines(self):
job = JobFactory(lines=['spanish=42,inquisition==7'])
assert job.status == 'pending', 'It has 2 unparsed lines'
def test_pending_if_pending_specs(self):
job = JobFactory(specs=['foo=1,bar==2'])
assert job.status == 'running', 'It has 2 unfinished specs, but lines are parsed'
def test_running_if_running_and_finished_specs(self):
job = JobFactory(specs=['foo=1,bar==2'])
spec = job.specs.first()
spec.status = 'running'
spec.save()
job = Job.objects.get(pk=job.pk)
assert job.status == 'running', 'Job has started, but has not finished yet'
def test_running_if_one_spec_pending(self):
job = JobFactory(specs=['foo=1,bar==2'])
job.specs.all().update(status='success')
job = Job.objects.get(pk=job.pk)
assert job.status == 'success', 'One spec pending'
def test_running_if_finished_and_pending_specs(self):
job = JobFactory(specs=['steve==1', 'jobs==2'])
spec = job.specs.first()
spec.status = 'finished'
spec.save()
assert job.status == 'running', 'One spec has finished, but 1 line is not parsed yet'
def test_completed_if_specs_completed(self):
job = JobFactory(specs=['foo=1,bar==2'])
job.specs.all().update(status='success')
job = Job.objects.get(pk=job.pk)
assert job.status == 'success', 'All specs have finished'
class JobSpecTest(TestCase):
def test_process_requirement(self):
job = JobFactory(lines=['Django==1.5.4'])
package, package_created, spec, spec_created = job.lines.all()[0].set_distribution(*fake_distributions('Django==1.5.4'))
assert list(map(str, Package.objects.all())) == ['Django']
assert list(map(str, job.specs.all())) == ['Django==1.5.4']
assert package_created
assert spec_created
def test_does_not_create_duplicate_specs(self):
spec = SpecFactory(version='0.2.19', package__name='lettuce')
job = JobFactory(lines=['lettuce==0.2.19'])
same_package, package_created, same_spec, spec_created = job.lines.all()[0].set_distribution(*fake_distributions('lettuce==0.2.19'))
assert not package_created
assert not spec_created
assert Spec.objects.count() == 1
assert Package.objects.count() == 1
assert job.specs.all().first().version == spec.version
assert job.specs.all().first().package.name == spec.package.name
assert spec.pk == same_spec.pk
assert same_package.pk == same_spec.package.pk
class PypiTaskTest(TestCase):
@patch('api.PyPI.get_info')
def test_updates_spec(self, get_info_mock):
last_release_date = timezone.now()
py3_versions = ['3', '3.2', '3.3']
get_info_mock.return_value = {
'last_release_date': last_release_date,
'py3_versions': py3_versions,
}
spec = SpecFactory(version='0.2.19', package__name='lettuce')
assert query_pypi(spec.pk) == get_info_mock.return_value
spec = Spec.objects.get(pk=spec.pk)
assert spec.release_date == last_release_date
assert spec.python_versions == py3_versions
| {
"content_hash": "39a7318f339b2e91d7f15b1143a462ef",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 140,
"avg_line_length": 36.05072463768116,
"alnum_prop": 0.6182914572864322,
"repo_name": "futurecolors/gopython3",
"id": "02e68291594bb6c1efdd15444a5a57d4ed507f59",
"size": "4991",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "gopython3/core/tests/test_unit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8288"
},
{
"name": "JavaScript",
"bytes": "20005"
},
{
"name": "Python",
"bytes": "99644"
},
{
"name": "Shell",
"bytes": "1559"
}
],
"symlink_target": ""
} |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import time
import unittest
from datetime import datetime, timedelta
from dateutil.tz import tzutc, tzoffset
from azure import (
WindowsAzureError,
WindowsAzureBatchOperationError,
WindowsAzureMissingResourceError,
)
from azure.storage import (
AccessPolicy,
Entity,
EntityProperty,
SignedIdentifier,
SignedIdentifiers,
StorageServiceProperties,
TableService,
TableSharedAccessPermissions,
)
from azure.storage.sharedaccesssignature import SharedAccessPolicy
from util import (
AzureTestCase,
credentials,
getUniqueName,
set_service_options,
)
#------------------------------------------------------------------------------
MAX_RETRY = 60
#------------------------------------------------------------------------------
class TableServiceTest(AzureTestCase):
def setUp(self):
self.ts = TableService(credentials.getStorageServicesName(),
credentials.getStorageServicesKey())
set_service_options(self.ts)
self.table_name = getUniqueName('uttable')
self.additional_table_names = []
def tearDown(self):
self.cleanup()
return super(TableServiceTest, self).tearDown()
def cleanup(self):
try:
self.ts.delete_table(self.table_name)
except:
pass
for name in self.additional_table_names:
try:
self.ts.delete_table(name)
except:
pass
#--Helpers-----------------------------------------------------------------
def _create_table(self, table_name):
'''
Creates a table with the specified name.
'''
self.ts.create_table(table_name, True)
def _create_table_with_default_entities(self, table_name, entity_count):
'''
Creates a table with the specified name and adds entities with the
default set of values. PartitionKey is set to 'MyPartition' and RowKey
is set to a unique counter value starting at 1 (as a string).
'''
entities = []
self._create_table(table_name)
for i in range(1, entity_count + 1):
entities.append(self.ts.insert_entity(
table_name,
self._create_default_entity_dict('MyPartition', str(i))))
return entities
def _create_default_entity_class(self, partition, row):
'''
Creates a class-based entity with fixed values, using all
of the supported data types.
'''
entity = Entity()
entity.PartitionKey = partition
entity.RowKey = row
entity.age = 39
entity.sex = 'male'
entity.married = True
entity.deceased = False
entity.optional = None
entity.ratio = 3.1
entity.large = 9333111000
entity.Birthday = datetime(1973, 10, 4)
entity.birthday = datetime(1970, 10, 4)
entity.binary = None
entity.other = EntityProperty('Edm.Int64', 20)
entity.clsid = EntityProperty(
'Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
def _create_default_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, using all
of the supported data types.
'''
return {'PartitionKey': partition,
'RowKey': row,
'age': 39,
'sex': 'male',
'married': True,
'deceased': False,
'optional': None,
'ratio': 3.1,
'large': 9333111000,
'Birthday': datetime(1973, 10, 4),
'birthday': datetime(1970, 10, 4),
'other': EntityProperty('Edm.Int64', 20),
'clsid': EntityProperty(
'Edm.Guid',
'c9da6455-213d-42c9-9a79-3e9149a57833')}
def _create_updated_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, with a
different set of values than the default entity. It
adds fields, changes field values, changes field types,
and removes fields when compared to the default entity.
'''
return {'PartitionKey': partition,
'RowKey': row,
'age': 'abc',
'sex': 'female',
'sign': 'aquarius',
'birthday': datetime(1991, 10, 4)}
def _assert_default_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity.
'''
self.assertEqual(entity.age, 39)
self.assertEqual(entity.sex, 'male')
self.assertEqual(entity.married, True)
self.assertEqual(entity.deceased, False)
self.assertFalse(hasattr(entity, "aquarius"))
self.assertEqual(entity.ratio, 3.1)
self.assertEqual(entity.large, 9333111000)
self.assertEqual(entity.Birthday, datetime(1973, 10, 4, tzinfo=tzutc()))
self.assertEqual(entity.birthday, datetime(1970, 10, 4, tzinfo=tzutc()))
self.assertEqual(entity.other, 20)
self.assertIsInstance(entity.clsid, EntityProperty)
self.assertEqual(entity.clsid.type, 'Edm.Guid')
self.assertEqual(entity.clsid.value,
'c9da6455-213d-42c9-9a79-3e9149a57833')
self.assertTrue(hasattr(entity, "Timestamp"))
def _assert_updated_entity(self, entity):
'''
Asserts that the entity passed in matches the updated entity.
'''
self.assertEqual(entity.age, 'abc')
self.assertEqual(entity.sex, 'female')
self.assertFalse(hasattr(entity, "married"))
self.assertFalse(hasattr(entity, "deceased"))
self.assertEqual(entity.sign, 'aquarius')
self.assertFalse(hasattr(entity, "optional"))
self.assertFalse(hasattr(entity, "ratio"))
self.assertFalse(hasattr(entity, "large"))
self.assertFalse(hasattr(entity, "Birthday"))
self.assertEqual(entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc()))
self.assertFalse(hasattr(entity, "other"))
self.assertFalse(hasattr(entity, "clsid"))
self.assertTrue(hasattr(entity, "Timestamp"))
def _assert_merged_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity
merged with the updated entity.
'''
self.assertEqual(entity.age, 'abc')
self.assertEqual(entity.sex, 'female')
self.assertEqual(entity.sign, 'aquarius')
self.assertEqual(entity.married, True)
self.assertEqual(entity.deceased, False)
self.assertEqual(entity.sign, 'aquarius')
self.assertEqual(entity.ratio, 3.1)
self.assertEqual(entity.large, 9333111000)
self.assertEqual(entity.Birthday, datetime(1973, 10, 4, tzinfo=tzutc()))
self.assertEqual(entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc()))
self.assertEqual(entity.other, 20)
self.assertIsInstance(entity.clsid, EntityProperty)
self.assertEqual(entity.clsid.type, 'Edm.Guid')
self.assertEqual(entity.clsid.value,
'c9da6455-213d-42c9-9a79-3e9149a57833')
self.assertTrue(hasattr(entity, "Timestamp"))
def _get_shared_access_policy(self, permission):
date_format = "%Y-%m-%dT%H:%M:%SZ"
start = datetime.utcnow() - timedelta(minutes=1)
expiry = start + timedelta(hours=1)
return SharedAccessPolicy(
AccessPolicy(
start.strftime(date_format),
expiry.strftime(date_format),
permission
)
)
#--Test cases for table service -------------------------------------------
def test_get_set_table_service_properties(self):
table_properties = self.ts.get_table_service_properties()
self.ts.set_table_service_properties(table_properties)
tests = [('logging.delete', True),
('logging.delete', False),
('logging.read', True),
('logging.read', False),
('logging.write', True),
('logging.write', False),
]
for path, value in tests:
# print path
cur = table_properties
for component in path.split('.')[:-1]:
cur = getattr(cur, component)
last_attr = path.split('.')[-1]
setattr(cur, last_attr, value)
self.ts.set_table_service_properties(table_properties)
retry_count = 0
while retry_count < MAX_RETRY:
table_properties = self.ts.get_table_service_properties()
cur = table_properties
for component in path.split('.'):
cur = getattr(cur, component)
if value == cur:
break
time.sleep(1)
retry_count += 1
self.assertEqual(value, cur)
def test_table_service_retention_single_set(self):
table_properties = self.ts.get_table_service_properties()
table_properties.logging.retention_policy.enabled = False
table_properties.logging.retention_policy.days = 5
# TODO: Better error, ValueError?
self.assertRaises(WindowsAzureError,
self.ts.set_table_service_properties,
table_properties)
table_properties = self.ts.get_table_service_properties()
table_properties.logging.retention_policy.days = None
table_properties.logging.retention_policy.enabled = True
# TODO: Better error, ValueError?
self.assertRaises(WindowsAzureError,
self.ts.set_table_service_properties,
table_properties)
def test_table_service_set_both(self):
table_properties = self.ts.get_table_service_properties()
table_properties.logging.retention_policy.enabled = True
table_properties.logging.retention_policy.days = 5
self.ts.set_table_service_properties(table_properties)
table_properties = self.ts.get_table_service_properties()
self.assertEqual(
True, table_properties.logging.retention_policy.enabled)
self.assertEqual(5, table_properties.logging.retention_policy.days)
#--Test cases for tables --------------------------------------------------
def test_create_table(self):
# Arrange
# Act
created = self.ts.create_table(self.table_name)
# Assert
self.assertTrue(created)
def test_create_table_fail_on_exist(self):
# Arrange
# Act
created = self.ts.create_table(self.table_name, True)
# Assert
self.assertTrue(created)
def test_create_table_with_already_existing_table(self):
# Arrange
# Act
created1 = self.ts.create_table(self.table_name)
created2 = self.ts.create_table(self.table_name)
# Assert
self.assertTrue(created1)
self.assertFalse(created2)
def test_create_table_with_already_existing_table_fail_on_exist(self):
# Arrange
# Act
created = self.ts.create_table(self.table_name)
with self.assertRaises(WindowsAzureError):
self.ts.create_table(self.table_name, True)
# Assert
self.assertTrue(created)
def test_query_tables(self):
# Arrange
self._create_table(self.table_name)
# Act
tables = self.ts.query_tables()
for table in tables:
pass
# Assert
tableNames = [x.name for x in tables]
self.assertGreaterEqual(len(tableNames), 1)
self.assertGreaterEqual(len(tables), 1)
self.assertIn(self.table_name, tableNames)
def test_query_tables_with_table_name(self):
# Arrange
self._create_table(self.table_name)
# Act
tables = self.ts.query_tables(self.table_name)
for table in tables:
pass
# Assert
self.assertEqual(len(tables), 1)
self.assertEqual(tables[0].name, self.table_name)
def test_query_tables_with_table_name_no_tables(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.ts.query_tables(self.table_name)
# Assert
def test_query_tables_with_top(self):
# Arrange
self.additional_table_names = [
self.table_name + suffix for suffix in 'abcd']
for name in self.additional_table_names:
self.ts.create_table(name)
# Act
tables = self.ts.query_tables(None, 3)
for table in tables:
pass
# Assert
self.assertEqual(len(tables), 3)
def test_query_tables_with_top_and_next_table_name(self):
# Arrange
self.additional_table_names = [
self.table_name + suffix for suffix in 'abcd']
for name in self.additional_table_names:
self.ts.create_table(name)
# Act
tables_set1 = self.ts.query_tables(None, 3)
tables_set2 = self.ts.query_tables(
None, 3, tables_set1.x_ms_continuation['NextTableName'])
# Assert
self.assertEqual(len(tables_set1), 3)
self.assertGreaterEqual(len(tables_set2), 1)
self.assertLessEqual(len(tables_set2), 3)
def test_delete_table_with_existing_table(self):
# Arrange
self._create_table(self.table_name)
# Act
deleted = self.ts.delete_table(self.table_name)
# Assert
self.assertTrue(deleted)
tables = self.ts.query_tables()
self.assertNamedItemNotInContainer(tables, self.table_name)
def test_delete_table_with_existing_table_fail_not_exist(self):
# Arrange
self._create_table(self.table_name)
# Act
deleted = self.ts.delete_table(self.table_name, True)
# Assert
self.assertTrue(deleted)
tables = self.ts.query_tables()
self.assertNamedItemNotInContainer(tables, self.table_name)
def test_delete_table_with_non_existing_table(self):
# Arrange
# Act
deleted = self.ts.delete_table(self.table_name)
# Assert
self.assertFalse(deleted)
def test_delete_table_with_non_existing_table_fail_not_exist(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.ts.delete_table(self.table_name, True)
# Assert
#--Test cases for entities ------------------------------------------
def test_insert_entity_dictionary(self):
# Arrange
self._create_table(self.table_name)
# Act
dict = self._create_default_entity_dict('MyPartition', '1')
resp = self.ts.insert_entity(self.table_name, dict)
# Assert
self.assertIsNotNone(resp)
def test_insert_entity_class_instance(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = self._create_default_entity_class('MyPartition', '1')
resp = self.ts.insert_entity(self.table_name, entity)
# Assert
self.assertIsNotNone(resp)
def test_insert_entity_conflict(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.insert_entity(
self.table_name,
self._create_default_entity_dict('MyPartition', '1'))
# Assert
def test_get_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.get_entity(self.table_name, 'MyPartition', '1')
# Assert
self.assertEqual(resp.PartitionKey, 'MyPartition')
self.assertEqual(resp.RowKey, '1')
self._assert_default_entity(resp)
def test_get_entity_not_existing(self):
# Arrange
self._create_table(self.table_name)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.get_entity(self.table_name, 'MyPartition', '1')
# Assert
def test_get_entity_with_select(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.get_entity(
self.table_name, 'MyPartition', '1', 'age,sex')
# Assert
self.assertEqual(resp.age, 39)
self.assertEqual(resp.sex, 'male')
self.assertFalse(hasattr(resp, "birthday"))
self.assertFalse(hasattr(resp, "married"))
self.assertFalse(hasattr(resp, "deceased"))
def test_query_entities(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
# Act
resp = self.ts.query_entities(self.table_name)
# Assert
self.assertEqual(len(resp), 2)
for entity in resp:
self.assertEqual(entity.PartitionKey, 'MyPartition')
self._assert_default_entity(entity)
self.assertEqual(resp[0].RowKey, '1')
self.assertEqual(resp[1].RowKey, '2')
def test_query_entities_large(self):
# Arrange
self._create_table(self.table_name)
total_entities_count = 1000
entities_per_batch = 50
for j in range(total_entities_count // entities_per_batch):
self.ts.begin_batch()
for i in range(entities_per_batch):
entity = Entity()
entity.PartitionKey = 'large'
entity.RowKey = 'batch{0}-item{1}'.format(j, i)
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'hello world;' * 100
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
self.ts.commit_batch()
# Act
start_time = datetime.now()
resp = self.ts.query_entities(self.table_name)
elapsed_time = datetime.now() - start_time
# Assert
print('query_entities took {0} secs.'.format(elapsed_time.total_seconds()))
# azure allocates 5 seconds to execute a query
# if it runs slowly, it will return fewer results and make the test fail
self.assertEqual(len(resp), total_entities_count)
def test_query_entities_with_filter(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
self.ts.insert_entity(
self.table_name,
self._create_default_entity_dict('MyOtherPartition', '3'))
# Act
resp = self.ts.query_entities(
self.table_name, "PartitionKey eq 'MyPartition'")
# Assert
self.assertEqual(len(resp), 2)
for entity in resp:
self.assertEqual(entity.PartitionKey, 'MyPartition')
self._assert_default_entity(entity)
def test_query_entities_with_select(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
# Act
resp = self.ts.query_entities(self.table_name, None, 'age,sex')
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].age, 39)
self.assertEqual(resp[0].sex, 'male')
self.assertFalse(hasattr(resp[0], "birthday"))
self.assertFalse(hasattr(resp[0], "married"))
self.assertFalse(hasattr(resp[0], "deceased"))
def test_query_entities_with_top(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 3)
# Act
resp = self.ts.query_entities(self.table_name, None, None, 2)
# Assert
self.assertEqual(len(resp), 2)
def test_query_entities_with_top_and_next(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 5)
# Act
resp1 = self.ts.query_entities(self.table_name, None, None, 2)
resp2 = self.ts.query_entities(
self.table_name, None, None, 2,
resp1.x_ms_continuation['NextPartitionKey'],
resp1.x_ms_continuation['NextRowKey'])
resp3 = self.ts.query_entities(
self.table_name, None, None, 2,
resp2.x_ms_continuation['NextPartitionKey'],
resp2.x_ms_continuation['NextRowKey'])
# Assert
self.assertEqual(len(resp1), 2)
self.assertEqual(len(resp2), 2)
self.assertEqual(len(resp3), 1)
self.assertEqual(resp1[0].RowKey, '1')
self.assertEqual(resp1[1].RowKey, '2')
self.assertEqual(resp2[0].RowKey, '3')
self.assertEqual(resp2[1].RowKey, '4')
self.assertEqual(resp3[0].RowKey, '5')
def test_update_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.update_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_update_entity_with_if_matches(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.update_entity(
self.table_name,
'MyPartition', '1', sent_entity, if_match=entities[0].etag)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_update_entity_with_if_doesnt_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
with self.assertRaises(WindowsAzureError):
self.ts.update_entity(
self.table_name, 'MyPartition', '1', sent_entity,
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
# Assert
def test_insert_or_merge_entity_with_existing_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_merged_entity(received_entity)
def test_insert_or_merge_entity_with_non_existing_entity(self):
# Arrange
self._create_table(self.table_name)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_insert_or_replace_entity_with_existing_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_replace_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_insert_or_replace_entity_with_non_existing_entity(self):
# Arrange
self._create_table(self.table_name)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.insert_or_replace_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_merge_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_merged_entity(received_entity)
def test_merge_entity_not_existing(self):
# Arrange
self._create_table(self.table_name)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
with self.assertRaises(WindowsAzureError):
self.ts.merge_entity(
self.table_name, 'MyPartition', '1', sent_entity)
# Assert
def test_merge_entity_with_if_matches(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = self.ts.merge_entity(
self.table_name, 'MyPartition', '1',
sent_entity, if_match=entities[0].etag)
# Assert
self.assertIsNotNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_merged_entity(received_entity)
def test_merge_entity_with_if_doesnt_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
with self.assertRaises(WindowsAzureError):
self.ts.merge_entity(
self.table_name, 'MyPartition', '1', sent_entity,
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
# Assert
def test_delete_entity(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.delete_entity(self.table_name, 'MyPartition', '1')
# Assert
self.assertIsNone(resp)
with self.assertRaises(WindowsAzureError):
self.ts.get_entity(self.table_name, 'MyPartition', '1')
def test_delete_entity_not_existing(self):
# Arrange
self._create_table(self.table_name)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.delete_entity(self.table_name, 'MyPartition', '1')
# Assert
def test_delete_entity_with_if_matches(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.delete_entity(
self.table_name, 'MyPartition', '1', if_match=entities[0].etag)
# Assert
self.assertIsNone(resp)
with self.assertRaises(WindowsAzureError):
self.ts.get_entity(self.table_name, 'MyPartition', '1')
def test_delete_entity_with_if_doesnt_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.delete_entity(
self.table_name, 'MyPartition', '1',
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
# Assert
#--Test cases for batch ---------------------------------------------
def test_with_filter_single(self):
called = []
def my_filter(request, next):
called.append(True)
return next(request)
tc = self.ts.with_filter(my_filter)
tc.create_table(self.table_name)
self.assertTrue(called)
del called[:]
tc.delete_table(self.table_name)
self.assertTrue(called)
del called[:]
def test_with_filter_chained(self):
called = []
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
tc = self.ts.with_filter(filter_a).with_filter(filter_b)
tc.create_table(self.table_name)
self.assertEqual(called, ['b', 'a'])
tc.delete_table(self.table_name)
def test_batch_insert(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.begin_batch()
self.ts.insert_entity(self.table_name, entity)
self.ts.commit_batch()
# Assert
result = self.ts.get_entity(self.table_name, '001', 'batch_insert')
self.assertIsNotNone(result)
def test_batch_update(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_update'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_update')
self.assertEqual(3, entity.test3)
entity.test2 = 'value1'
self.ts.begin_batch()
self.ts.update_entity(self.table_name, '001', 'batch_update', entity)
self.ts.commit_batch()
entity = self.ts.get_entity(self.table_name, '001', 'batch_update')
# Assert
self.assertEqual('value1', entity.test2)
def test_batch_merge(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_merge')
self.assertEqual(3, entity.test3)
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test2 = 'value1'
self.ts.begin_batch()
self.ts.merge_entity(self.table_name, '001', 'batch_merge', entity)
self.ts.commit_batch()
entity = self.ts.get_entity(self.table_name, '001', 'batch_merge')
# Assert
self.assertEqual('value1', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_update_if_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 1)
# Act
sent_entity = self._create_updated_entity_dict('MyPartition', '1')
self.ts.begin_batch()
resp = self.ts.update_entity(
self.table_name,
'MyPartition', '1', sent_entity, if_match=entities[0].etag)
self.ts.commit_batch()
# Assert
self.assertIsNone(resp)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_batch_update_if_doesnt_match(self):
# Arrange
entities = self._create_table_with_default_entities(self.table_name, 2)
# Act
sent_entity1 = self._create_updated_entity_dict('MyPartition', '1')
sent_entity2 = self._create_updated_entity_dict('MyPartition', '2')
self.ts.begin_batch()
self.ts.update_entity(
self.table_name, 'MyPartition', '1', sent_entity1,
if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
self.ts.update_entity(
self.table_name, 'MyPartition', '2', sent_entity2)
try:
self.ts.commit_batch()
except WindowsAzureBatchOperationError as error:
self.assertEqual(error.code, 'UpdateConditionNotSatisfied')
self.assertTrue(str(error).startswith('0:The update condition specified in the request was not satisfied.'))
else:
self.fail('WindowsAzureBatchOperationError was expected')
# Assert
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '1')
self._assert_default_entity(received_entity)
received_entity = self.ts.get_entity(
self.table_name, 'MyPartition', '2')
self._assert_default_entity(received_entity)
def test_batch_insert_replace(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_replace'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.begin_batch()
self.ts.insert_or_replace_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.ts.commit_batch()
entity = self.ts.get_entity(
self.table_name, '001', 'batch_insert_replace')
# Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_insert_merge(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_merge'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.begin_batch()
self.ts.insert_or_merge_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.ts.commit_batch()
entity = self.ts.get_entity(
self.table_name, '001', 'batch_insert_merge')
# Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_delete(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_delete'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity = self.ts.get_entity(self.table_name, '001', 'batch_delete')
#self.assertEqual(3, entity.test3)
self.ts.begin_batch()
self.ts.delete_entity(self.table_name, '001', 'batch_delete')
self.ts.commit_batch()
def test_batch_inserts(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
self.ts.begin_batch()
for i in range(100):
entity.RowKey = str(i)
self.ts.insert_entity(self.table_name, entity)
self.ts.commit_batch()
entities = self.ts.query_entities(
self.table_name, "PartitionKey eq 'batch_inserts'", '')
# Assert
self.assertIsNotNone(entities)
self.assertEqual(100, len(entities))
def test_batch_all_operations_together(self):
# Arrange
self._create_table(self.table_name)
# Act
entity = Entity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty('Edm.Boolean', 'true')
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-2'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-3'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-4'
self.ts.insert_entity(self.table_name, entity)
self.ts.begin_batch()
entity.RowKey = 'batch_all_operations_together'
self.ts.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-1'
self.ts.delete_entity(
self.table_name, entity.PartitionKey, entity.RowKey)
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
self.ts.update_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
self.ts.merge_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
self.ts.insert_or_replace_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-5'
self.ts.insert_or_merge_entity(
self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.ts.commit_batch()
# Assert
entities = self.ts.query_entities(
self.table_name, "PartitionKey eq '003'", '')
self.assertEqual(5, len(entities))
def test_batch_same_row_operations_fail(self):
# Arrange
self._create_table(self.table_name)
entity = self._create_default_entity_dict('001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.begin_batch()
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
self.ts.update_entity(
self.table_name,
entity['PartitionKey'],
entity['RowKey'], entity)
entity = self._create_default_entity_dict(
'001', 'batch_negative_1')
self.ts.merge_entity(
self.table_name,
entity['PartitionKey'],
entity['RowKey'], entity)
self.ts.cancel_batch()
# Assert
def test_batch_different_partition_operations_fail(self):
# Arrange
self._create_table(self.table_name)
entity = self._create_default_entity_dict('001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.begin_batch()
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
self.ts.update_entity(
self.table_name, entity['PartitionKey'], entity['RowKey'],
entity)
entity = self._create_default_entity_dict(
'002', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
self.ts.cancel_batch()
# Assert
def test_batch_different_table_operations_fail(self):
# Arrange
other_table_name = self.table_name + 'other'
self.additional_table_names = [other_table_name]
self._create_table(self.table_name)
self._create_table(other_table_name)
# Act
with self.assertRaises(WindowsAzureError):
self.ts.begin_batch()
entity = self._create_default_entity_dict(
'001', 'batch_negative_1')
self.ts.insert_entity(self.table_name, entity)
entity = self._create_default_entity_dict(
'001', 'batch_negative_2')
self.ts.insert_entity(other_table_name, entity)
self.ts.cancel_batch()
def test_unicode_property_value(self):
''' regression test for github issue #57'''
# Act
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test1', 'Description': u'ꀕ'})
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test2', 'Description': 'ꀕ'})
resp = self.ts.query_entities(
self.table_name, "PartitionKey eq 'test'")
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].Description, u'ꀕ')
self.assertEqual(resp[1].Description, u'ꀕ')
def test_unicode_property_name(self):
# Act
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test1', u'啊齄丂狛狜': u'ꀕ'})
self.ts.insert_entity(
self.table_name,
{'PartitionKey': 'test', 'RowKey': 'test2', u'啊齄丂狛狜': 'hello'})
resp = self.ts.query_entities(
self.table_name, "PartitionKey eq 'test'")
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].__dict__[u'啊齄丂狛狜'], u'ꀕ')
self.assertEqual(resp[1].__dict__[u'啊齄丂狛狜'], u'hello')
def test_unicode_create_table_unicode_name(self):
# Arrange
self.table_name = self.table_name + u'啊齄丂狛狜'
# Act
with self.assertRaises(WindowsAzureError):
# not supported - table name must be alphanumeric, lowercase
self.ts.create_table(self.table_name)
# Assert
def test_empty_and_spaces_property_value(self):
# Act
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{
'PartitionKey': 'test',
'RowKey': 'test1',
'EmptyByte': '',
'EmptyUnicode': u'',
'SpacesOnlyByte': ' ',
'SpacesOnlyUnicode': u' ',
'SpacesBeforeByte': ' Text',
'SpacesBeforeUnicode': u' Text',
'SpacesAfterByte': 'Text ',
'SpacesAfterUnicode': u'Text ',
'SpacesBeforeAndAfterByte': ' Text ',
'SpacesBeforeAndAfterUnicode': u' Text ',
})
resp = self.ts.get_entity(self.table_name, 'test', 'test1')
# Assert
self.assertIsNotNone(resp)
self.assertEqual(resp.EmptyByte, '')
self.assertEqual(resp.EmptyUnicode, u'')
self.assertEqual(resp.SpacesOnlyByte, ' ')
self.assertEqual(resp.SpacesOnlyUnicode, u' ')
self.assertEqual(resp.SpacesBeforeByte, ' Text')
self.assertEqual(resp.SpacesBeforeUnicode, u' Text')
self.assertEqual(resp.SpacesAfterByte, 'Text ')
self.assertEqual(resp.SpacesAfterUnicode, u'Text ')
self.assertEqual(resp.SpacesBeforeAndAfterByte, ' Text ')
self.assertEqual(resp.SpacesBeforeAndAfterUnicode, u' Text ')
def test_none_property_value(self):
# Act
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{
'PartitionKey': 'test',
'RowKey': 'test1',
'NoneValue': None,
})
resp = self.ts.get_entity(self.table_name, 'test', 'test1')
# Assert
self.assertIsNotNone(resp)
self.assertFalse(hasattr(resp, 'NoneValue'))
def test_binary_property_value(self):
# Act
binary_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\t\n'
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{
'PartitionKey': 'test',
'RowKey': 'test1',
'binary': EntityProperty('Edm.Binary', binary_data)
})
resp = self.ts.get_entity(self.table_name, 'test', 'test1')
# Assert
self.assertIsNotNone(resp)
self.assertEqual(resp.binary.type, 'Edm.Binary')
self.assertEqual(resp.binary.value, binary_data)
def test_timezone(self):
# Act
local_tz = tzoffset('BRST', -10800)
local_date = datetime(2003, 9, 27, 9, 52, 43, tzinfo=local_tz)
self._create_table(self.table_name)
self.ts.insert_entity(
self.table_name,
{
'PartitionKey': 'test',
'RowKey': 'test1',
'date': local_date,
})
resp = self.ts.get_entity(self.table_name, 'test', 'test1')
# Assert
self.assertIsNotNone(resp)
self.assertEqual(resp.date, local_date.astimezone(tzutc()))
self.assertEqual(resp.date.astimezone(local_tz), local_date)
def test_sas_query(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
token = self.ts.generate_shared_access_signature(
self.table_name,
self._get_shared_access_policy(TableSharedAccessPermissions.QUERY),
)
# Act
service = TableService(
credentials.getStorageServicesName(),
sas_token=token,
)
set_service_options(service)
resp = self.ts.query_entities(self.table_name, None, 'age,sex')
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].age, 39)
self.assertEqual(resp[0].sex, 'male')
self.assertFalse(hasattr(resp[0], "birthday"))
self.assertFalse(hasattr(resp[0], "married"))
self.assertFalse(hasattr(resp[0], "deceased"))
def test_sas_add(self):
# Arrange
self._create_table(self.table_name)
policy = self._get_shared_access_policy(TableSharedAccessPermissions.ADD)
token = self.ts.generate_shared_access_signature(self.table_name, policy)
# Act
service = TableService(
credentials.getStorageServicesName(),
sas_token=token,
)
set_service_options(service)
service.insert_entity(
self.table_name,
{
'PartitionKey': 'test',
'RowKey': 'test1',
'text': 'hello',
})
# Assert
resp = self.ts.get_entity(self.table_name, 'test', 'test1')
self.assertIsNotNone(resp)
self.assertEqual(resp.text, 'hello')
def test_sas_add_inside_range(self):
# Arrange
self._create_table(self.table_name)
policy = self._get_shared_access_policy(TableSharedAccessPermissions.ADD)
policy.access_policy.start_pk = 'test'
policy.access_policy.end_pk = 'test'
policy.access_policy.start_rk = 'test1'
policy.access_policy.end_rk = 'test1'
token = self.ts.generate_shared_access_signature(self.table_name, policy)
# Act
service = TableService(
credentials.getStorageServicesName(),
sas_token=token,
)
set_service_options(service)
service.insert_entity(
self.table_name,
{
'PartitionKey': 'test',
'RowKey': 'test1',
'text': 'hello',
})
# Assert
resp = self.ts.get_entity(self.table_name, 'test', 'test1')
self.assertIsNotNone(resp)
self.assertEqual(resp.text, 'hello')
def test_sas_add_outside_range(self):
# Arrange
self._create_table(self.table_name)
policy = self._get_shared_access_policy(TableSharedAccessPermissions.ADD)
policy.access_policy.start_pk = 'test'
policy.access_policy.end_pk = 'test'
policy.access_policy.start_rk = 'test1'
policy.access_policy.end_rk = 'test1'
token = self.ts.generate_shared_access_signature(self.table_name, policy)
# Act
service = TableService(
credentials.getStorageServicesName(),
sas_token=token,
)
set_service_options(service)
with self.assertRaises(WindowsAzureMissingResourceError):
service.insert_entity(
self.table_name,
{
'PartitionKey': 'test',
'RowKey': 'test2',
'text': 'hello',
})
# Assert
def test_sas_update(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
policy = self._get_shared_access_policy(TableSharedAccessPermissions.UPDATE)
token = self.ts.generate_shared_access_signature(self.table_name, policy)
# Act
service = TableService(
credentials.getStorageServicesName(),
sas_token=token,
)
set_service_options(service)
updated_entity = self._create_updated_entity_dict('MyPartition', '1')
resp = service.update_entity(self.table_name, 'MyPartition', '1', updated_entity)
# Assert
received_entity = self.ts.get_entity(self.table_name, 'MyPartition', '1')
self._assert_updated_entity(received_entity)
def test_sas_delete(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
policy = self._get_shared_access_policy(TableSharedAccessPermissions.DELETE)
token = self.ts.generate_shared_access_signature(self.table_name, policy)
# Act
service = TableService(
credentials.getStorageServicesName(),
sas_token=token,
)
set_service_options(service)
service.delete_entity(self.table_name, 'MyPartition', '1')
# Assert
with self.assertRaises(WindowsAzureMissingResourceError):
self.ts.get_entity(self.table_name, 'MyPartition', '1')
def test_sas_signed_identifier(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 2)
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2018-10-12'
si.access_policy.permission = TableSharedAccessPermissions.QUERY
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.ts.set_table_acl(self.table_name, identifiers)
token = self.ts.generate_shared_access_signature(
self.table_name,
SharedAccessPolicy(signed_identifier=si.id),
)
# Act
service = TableService(
credentials.getStorageServicesName(),
sas_token=token,
)
set_service_options(service)
resp = self.ts.query_entities(self.table_name, None, 'age,sex')
# Assert
self.assertEqual(len(resp), 2)
self.assertEqual(resp[0].age, 39)
self.assertEqual(resp[0].sex, 'male')
self.assertFalse(hasattr(resp[0], "birthday"))
self.assertFalse(hasattr(resp[0], "married"))
self.assertFalse(hasattr(resp[0], "deceased"))
def test_get_table_acl(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
acl = self.ts.get_table_acl(self.table_name)
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
def test_get_table_acl_iter(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
acl = self.ts.get_table_acl(self.table_name)
for signed_identifier in acl:
pass
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
self.assertEqual(len(acl), 0)
def test_get_table_acl_with_non_existing_container(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.ts.get_table_acl(self.table_name)
# Assert
def test_set_table_acl(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
resp = self.ts.set_table_acl(self.table_name)
# Assert
self.assertIsNone(resp)
acl = self.ts.get_table_acl(self.table_name)
self.assertIsNotNone(acl)
def test_set_table_acl_with_empty_signed_identifiers(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
identifiers = SignedIdentifiers()
resp = self.ts.set_table_acl(self.table_name, identifiers)
# Assert
self.assertIsNone(resp)
acl = self.ts.get_table_acl(self.table_name)
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
def test_set_table_acl_with_signed_identifiers(self):
# Arrange
self._create_table_with_default_entities(self.table_name, 1)
# Act
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2011-10-12'
si.access_policy.permission = TableSharedAccessPermissions.QUERY
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.ts.set_table_acl(self.table_name, identifiers)
# Assert
self.assertIsNone(resp)
acl = self.ts.get_table_acl(self.table_name)
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 1)
self.assertEqual(len(acl), 1)
self.assertEqual(acl.signed_identifiers[0].id, 'testid')
self.assertEqual(acl[0].id, 'testid')
def test_set_table_acl_with_non_existing_table(self):
# Arrange
# Act
with self.assertRaises(WindowsAzureError):
self.ts.set_table_acl(self.table_name)
# Assert
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ef48ca82e190032e22f98cbb00b044be",
"timestamp": "",
"source": "github",
"line_count": 1626,
"max_line_length": 120,
"avg_line_length": 34.621156211562116,
"alnum_prop": 0.5863857604718087,
"repo_name": "shwetams/python_azure_tables",
"id": "9c20d1cb668da1781f4fdaa6df973b63fa33dadb",
"size": "56358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tableservice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7954"
},
{
"name": "Python",
"bytes": "1230818"
},
{
"name": "Shell",
"bytes": "565"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import rabbithole
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Rabbit Hole'
copyright = u"2016, Javier Collado"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = rabbithole.__version__
# The full version, including alpha/beta/rc tags.
release = rabbithole.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rabbitholedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'rabbithole.tex',
u'Rabbit Hole Documentation',
u'Javier Collado', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rabbithole',
u'Rabbit Hole Documentation',
[u'Javier Collado'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rabbithole',
u'Rabbit Hole Documentation',
u'Javier Collado',
'rabbithole',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "54d9bc776908505ac616990abf827702",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 30.74230769230769,
"alnum_prop": 0.7032403352933817,
"repo_name": "jcollado/rabbithole",
"id": "a85c1b8bea47a2c59759c1127dab88607790e16c",
"size": "8438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "141"
},
{
"name": "Makefile",
"bytes": "2307"
},
{
"name": "Python",
"bytes": "48739"
}
],
"symlink_target": ""
} |
import datetime
from django.utils.timezone import now
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table('issues_project', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
))
db.send_create_signal('issues', ['Project'])
# Adding model 'Issue'
db.create_table('issues_issue', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=-1)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('assigned_to', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('closed_by_revision', self.gf('django.db.models.fields.CharField')(max_length=1000, null=True, blank=True)),
('close_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['issues.Project'], null=True)),
('days_estimate', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=65, decimal_places=5, blank=True)),
('milestone', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['issues.Milestone'], null=True, blank=True)),
))
db.send_create_signal('issues', ['Issue'])
# Adding model 'Milestone'
db.create_table('issues_milestone', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['issues.Project'])),
('deadline', self.gf('django.db.models.fields.DateTimeField')(default=now)),
))
db.send_create_signal('issues', ['Milestone'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table('issues_project')
# Deleting model 'Issue'
db.delete_table('issues_issue')
# Deleting model 'Milestone'
db.delete_table('issues_milestone')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'issues.issue': {
'Meta': {'ordering': "['project', 'closed_by_revision', '-priority']", 'object_name': 'Issue'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by_revision': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'days_estimate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '65', 'decimal_places': '5', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['issues.Milestone']", 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['issues.Project']", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'issues.milestone': {
'Meta': {'object_name': 'Milestone'},
'deadline': ('django.db.models.fields.DateTimeField', [], {'default': 'now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['issues.Project']"})
},
'issues.project': {
'Meta': {'object_name': 'Project'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
}
}
complete_apps = ['issues'] | {
"content_hash": "428147b5750c6ad7b1b789ebfad2a29d",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 182,
"avg_line_length": 66.62295081967213,
"alnum_prop": 0.5670521653543307,
"repo_name": "mostateresnet/django-ticket",
"id": "c6ca6fc0c4faec58bf496702fffb03dd6ca1eccb",
"size": "8152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "issues/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "37400"
},
{
"name": "Python",
"bytes": "324827"
}
],
"symlink_target": ""
} |
import json
import pymysql
import argparse
def InsertToMySQL(conn, record):
cursor = conn.cursor()
cursor.execute( "INSERT user values (%s,%s,%s,%s,%s)", [
record['user_id'], record['name'], record['review_count'],
record['average_stars'], record['yelping_since']])
def main(fileName):
conn = pymysql.Connect(host='localhost', user='root', passwd='',charset='utf8', db='yelpdb')
with open(fileName,'r') as f:
for line in f:
record = json.loads(line)
InsertToMySQL(conn, record)
conn.commit()
conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("fileName", help="Json file path")
args = parser.parse_args()
main(args.fileName)
| {
"content_hash": "42aa4955ea2771b0b01c266a9e14f428",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 96,
"avg_line_length": 25.733333333333334,
"alnum_prop": 0.6113989637305699,
"repo_name": "zhewang/restauranthunter",
"id": "d91de04c585e3a0377d82c7325b97e0a419b5421",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datascripts/load_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "130328"
},
{
"name": "HTML",
"bytes": "4345"
},
{
"name": "JavaScript",
"bytes": "244469"
},
{
"name": "Python",
"bytes": "5616"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'AnswerReference.migrated'
db.delete_column('website_answerreference', 'migrated')
# Adding field 'AnswerReference.migrated_answer_id'
db.add_column('website_answerreference', 'migrated_answer_id',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'AnswerReference.migrated'
db.add_column('website_answerreference', 'migrated',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'AnswerReference.migrated_answer_id'
db.delete_column('website_answerreference', 'migrated_answer_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'subscribed_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website'] | {
"content_hash": "9eaabaa229a8ac94c063ae7164408224",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 182,
"avg_line_length": 90.62256809338521,
"alnum_prop": 0.5471446972949764,
"repo_name": "solarpermit/solarpermit",
"id": "74de7c93794ab579e709849aea904e9a4cfdd237",
"size": "46604",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "website/migrations/0030_auto__del_field_answerreference_migrated__add_field_answerreference_mi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "126992"
},
{
"name": "JavaScript",
"bytes": "808802"
},
{
"name": "Python",
"bytes": "6625868"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.reductions.dcp2cone.atom_canonicalizers import log_canon
def log1p_canon(expr, args):
return log_canon(expr, [args[0] + 1])
| {
"content_hash": "cfbcbed5a1ccbf1353907d6592bf3739",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 33.57142857142857,
"alnum_prop": 0.774468085106383,
"repo_name": "merraksh/cvxpy",
"id": "cd017bc8aea92657bc22858cd0977fe940efa27f",
"size": "705",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cvxpy/reductions/dcp2cone/atom_canonicalizers/log1p_canon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120010"
},
{
"name": "C++",
"bytes": "5687983"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "2149670"
},
{
"name": "SWIG",
"bytes": "2403"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
} |
"""Tests for the profile page."""
__author__ = 'Sean Lip'
import datetime
from core.domain import exp_services
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
class SignupTest(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['Logout', 'Sign in'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 302)
self.assertIn('Logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('[email protected]', username='abc')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
# The email update preference should be whatever the setting in feconf
# is.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
# The email update preference should be True in all cases.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
# The email update preference should be False in all cases.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
class ProfileLinkTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = '[email protected]'
PROFILE_PIC_URL = '/preferenceshandler/profile_picture_by_username/'
def test_get_profile_picture_invalid_username(self):
response = self.testapp.get(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME), expect_errors=True
)
self.assertEqual(response.status_int, 404)
def test_get_profile_picture_valid_username(self):
self.signup(self.EMAIL, self.USERNAME)
response_dict = self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME)
)
self.assertEqual(
response_dict['profile_picture_data_url_for_username'],
None)
class ProfileDataHandlerTests(test_utils.GenericTestBase):
def test_profile_data_is_independent_of_currently_logged_in_user(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new editor bio'},
csrf_token=csrf_token)
self.logout()
self.signup(self.VIEWER_EMAIL, username=self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new viewer bio'},
csrf_token=csrf_token)
self.logout()
# Viewer looks at editor's profile page.
self.login(self.VIEWER_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.logout()
# Editor looks at their own profile page.
self.login(self.EDITOR_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.logout()
# Looged-out user looks at editor's profile page/
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
class FirstContributionDateTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = '[email protected]'
def test_contribution_datetime(self):
#Test the contribution date shows up correctly as nonexist.
self.signup(self.EMAIL, self.USERNAME)
self.login(self.EMAIL)
self.user_id = self.get_user_id_from_email(self.EMAIL)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(response_dict['first_contribution_datetime'], None)
#Update the first_contribution_datetime to the current datetime.
current_datetime = datetime.datetime.utcnow()
user_services.update_first_contribution_datetime(
self.user_id,current_datetime)
#Test the contribution date correctly changes to set date time.
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_datetime'],
utils.get_time_in_millisecs(current_datetime))
| {
"content_hash": "08bde782c03b8e669e7c4595c29f8baa",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 79,
"avg_line_length": 40.16,
"alnum_prop": 0.6259130146082338,
"repo_name": "won0089/oppia",
"id": "bf3045ea9cb111eaec512879dd6f60c25eb055d8",
"size": "12653",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/controllers/profile_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "64557"
},
{
"name": "HTML",
"bytes": "369137"
},
{
"name": "JavaScript",
"bytes": "1635914"
},
{
"name": "Python",
"bytes": "2009545"
},
{
"name": "Shell",
"bytes": "32702"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.maintenance import MaintenanceManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-maintenance
# USAGE
python configuration_assignments_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MaintenanceManagementClient(
credential=DefaultAzureCredential(),
subscription_id="5b4b650e-28b9-4790-b3ab-ddbd88d727c4",
)
response = client.configuration_assignments.delete(
resource_group_name="examplerg",
provider_name="Microsoft.Compute",
resource_type="virtualMachineScaleSets",
resource_name="smdtest1",
configuration_assignment_name="workervmConfiguration",
)
print(response)
# x-ms-original-file: specification/maintenance/resource-manager/Microsoft.Maintenance/preview/2022-07-01-preview/examples/ConfigurationAssignments_Delete.json
if __name__ == "__main__":
main()
| {
"content_hash": "1038f454a584af26dc7336df3c5cf848",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 159,
"avg_line_length": 36.361111111111114,
"alnum_prop": 0.7433155080213903,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5dccc98c7db0d2b2f3206c431fc12b9ed3c9c6e9",
"size": "1777",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/maintenance/azure-mgmt-maintenance/generated_samples/configuration_assignments_delete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Cross-language tests for the JWT primitives."""
import datetime
import json
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import jwt
from util import testing_servers
from util import utilities
SUPPORTED_LANGUAGES = testing_servers.SUPPORTED_LANGUAGES_BY_PRIMITIVE['jwt']
def setUpModule():
testing_servers.start('jwt')
def tearDownModule():
testing_servers.stop()
class JwtTest(parameterized.TestCase):
@parameterized.parameters(utilities.tinkey_template_names_for(jwt.JwtMac))
def test_compute_verify_jwt_mac(self, key_template_name):
supported_langs = utilities.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
key_template = utilities.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the keyset.
keyset = testing_servers.new_keyset(supported_langs[0], key_template)
supported_jwt_macs = []
for lang in supported_langs:
supported_jwt_macs.append(
testing_servers.remote_primitive(lang, keyset, jwt.JwtMac))
now = datetime.datetime.now(tz=datetime.timezone.utc)
raw_jwt = jwt.new_raw_jwt(
issuer='issuer',
expiration=now + datetime.timedelta(seconds=100))
for p in supported_jwt_macs:
compact = p.compute_mac_and_encode(raw_jwt)
validator = jwt.new_validator(expected_issuer='issuer', fixed_now=now)
for p2 in supported_jwt_macs:
verified_jwt = p2.verify_mac_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
@parameterized.parameters(
utilities.tinkey_template_names_for(jwt.JwtPublicKeySign))
def test_jwt_public_key_sign_verify(self, key_template_name):
supported_langs = utilities.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
key_template = utilities.KEY_TEMPLATE[key_template_name]
self.assertNotEmpty(supported_langs)
# Take the first supported language to generate the private keyset.
private_keyset = testing_servers.new_keyset(supported_langs[0],
key_template)
supported_signers = {}
for lang in supported_langs:
supported_signers[lang] = testing_servers.remote_primitive(
lang, private_keyset, jwt.JwtPublicKeySign)
public_keyset = testing_servers.public_keyset('java', private_keyset)
supported_verifiers = {}
for lang in supported_langs:
supported_verifiers[lang] = testing_servers.remote_primitive(
lang, public_keyset, jwt.JwtPublicKeyVerify)
now = datetime.datetime.now(tz=datetime.timezone.utc)
raw_jwt = jwt.new_raw_jwt(
issuer='issuer', expiration=now + datetime.timedelta(seconds=100))
for signer in supported_signers.values():
compact = signer.sign_and_encode(raw_jwt)
validator = jwt.new_validator(expected_issuer='issuer', fixed_now=now)
for verifier in supported_verifiers.values():
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
@parameterized.parameters(
utilities.tinkey_template_names_for(jwt.JwtPublicKeySign))
def test_jwt_public_key_sign_export_import_verify(self, key_template_name):
supported_langs = utilities.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
key_template = utilities.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the private keyset.
private_keyset = testing_servers.new_keyset(supported_langs[0],
key_template)
now = datetime.datetime.now(tz=datetime.timezone.utc)
raw_jwt = jwt.new_raw_jwt(
issuer='issuer', expiration=now + datetime.timedelta(seconds=100))
validator = jwt.new_validator(expected_issuer='issuer', fixed_now=now)
for lang1 in supported_langs:
# in lang1: sign token and export public keyset to a JWK set
signer = testing_servers.remote_primitive(lang1, private_keyset,
jwt.JwtPublicKeySign)
compact = signer.sign_and_encode(raw_jwt)
public_keyset = testing_servers.public_keyset(lang1, private_keyset)
public_jwk_set = testing_servers.jwk_set_from_keyset(lang1, public_keyset)
for lang2 in supported_langs:
# in lang2: import the public JWK set and verify the token
public_keyset = testing_servers.jwk_set_to_keyset(lang2, public_jwk_set)
verifier = testing_servers.remote_primitive(lang2, public_keyset,
jwt.JwtPublicKeyVerify)
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
# Additional tests for the "kid" property of the JWK and the "kid"
# header of the token. Either of them may be missing, but they must not
# have different values.
jwks = json.loads(public_jwk_set)
has_kid = 'kid' in jwks['keys'][0]
if has_kid:
# Change the "kid" property of the JWK.
jwks['keys'][0]['kid'] = 'unknown kid'
public_keyset = testing_servers.jwk_set_to_keyset(
lang2, json.dumps(jwks))
verifier = testing_servers.remote_primitive(lang2, public_keyset,
jwt.JwtPublicKeyVerify)
with self.assertRaises(
tink.TinkError,
msg='%s accepts tokens with an incorrect kid unexpectedly' %
lang2):
verifier.verify_and_decode(compact, validator)
# Remove the "kid" property of the JWK.
del jwks['keys'][0]['kid']
public_keyset = testing_servers.jwk_set_to_keyset(
lang2, json.dumps(jwks))
verifier = testing_servers.remote_primitive(lang2, public_keyset,
jwt.JwtPublicKeyVerify)
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
else:
# Add a "kid" property of the JWK.
jwks['keys'][0]['kid'] = 'unknown kid'
public_keyset = testing_servers.jwk_set_to_keyset(
lang2, json.dumps(jwks))
verifier = testing_servers.remote_primitive(lang2, public_keyset,
jwt.JwtPublicKeyVerify)
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "56ab5b53a3d13ffcc4b825a91f6af176",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 80,
"avg_line_length": 45.41216216216216,
"alnum_prop": 0.6551108466002084,
"repo_name": "google/tink",
"id": "e078c50c7cd94196f79343b860bee4c2018a396e",
"size": "7296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/cross_language/jwt_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17279"
},
{
"name": "C++",
"bytes": "5210061"
},
{
"name": "CMake",
"bytes": "232537"
},
{
"name": "Go",
"bytes": "2748716"
},
{
"name": "Java",
"bytes": "5065479"
},
{
"name": "Objective-C",
"bytes": "109729"
},
{
"name": "Objective-C++",
"bytes": "171502"
},
{
"name": "Python",
"bytes": "1412727"
},
{
"name": "Ruby",
"bytes": "637"
},
{
"name": "Shell",
"bytes": "263940"
},
{
"name": "Starlark",
"bytes": "1263006"
},
{
"name": "TypeScript",
"bytes": "636683"
}
],
"symlink_target": ""
} |
import datetime
from sqlalchemy import CheckConstraint, Column, Integer, String, ForeignKey, DateTime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class Project(Base):
__tablename__ = 'projects'
def __init__(self, name):
self.name = name
id = Column(Integer, primary_key=True)
name = Column(String)
class Config(Base):
__tablename__ = 'configs'
def __init__(self, name, project_id):
self.name = name
self.project_id = project_id
id = Column(Integer, primary_key=True)
name = Column(String)
project_id = Column(Integer, ForeignKey('projects.id'))
project = relationship("Project")
class Testsuite(Base):
__tablename__ = 'suites'
id = Column(Integer, primary_key=True)
name = Column(String)
project_id = Column(Integer, ForeignKey('projects.id'))
project = relationship("Project")
errors = Column(Integer, default=0)
date = Column(DateTime, default=datetime.datetime.utcnow)
failures = Column(Integer, default=0)
skips = Column(Integer, default=0)
tests = Column(Integer, default=0)
runtime = Column(Integer)
__table_args__ = (
CheckConstraint(errors >= 0, name='check_counts_positive'),
{})
'''
<testcase name="test_almost_equal[compatible_vector_array_pair0-compatible_vector_array_pair_without_reserve0]"
classname="src.pymortests.algorithms.basic"
time="0.006150484085083008"></testcase>
<testcase name="Timing"
classname="ProfilerTest"
time="0.451"
status="run" />'''
class Testcase(Base):
__tablename__ = 'cases'
id = Column(Integer, primary_key=True)
name = Column(String)
suite_id = Column(Integer, ForeignKey('suites.id'))
suite = relationship("Testsuite")
runtime = Column(Integer)
classname = Column(String)
def connect(db_path):
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///{}'.format(db_path))
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)
| {
"content_hash": "53c2fc84a6f0c60a808b397d34084749",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 111,
"avg_line_length": 28.32,
"alnum_prop": 0.6793785310734464,
"repo_name": "renemilk/pyjuxa",
"id": "17b4de99bdde07aa6719742f58c6a8fba7ae1b46",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyjuxa/db.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9778"
},
{
"name": "Shell",
"bytes": "685"
}
],
"symlink_target": ""
} |
import socket
class NetworkPrinter(object):
"""
Object to send ZPL to zebra network printer using sockets
:param ip_address: printer network address as 'xxx.xxx.xxx.xxx'
:param port: port of printer as int (default 9100)
"""
def __init__(self, ip_address, port=9100):
self.ip = ip_address
self.port = port
def print_zpl(self, zpl_document, timeout=10):
"""
Send ZPL2 formatted text to a network label printer
:param zpl_document: Document object, fully build for label.
:param timeout: Socket timeout for printer connection, default 10.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(timeout)
s.connect((self.ip, self.port))
s.send(zpl_document.zpl_bytes)
| {
"content_hash": "56fe09c5554ed756da26b7b793112efa",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 74,
"avg_line_length": 31.576923076923077,
"alnum_prop": 0.630937880633374,
"repo_name": "sacherjj/simple_zpl2",
"id": "066f48eb18ba3e8a6b9fc1bf0ad8ce22696407de",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_zpl2/printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2289"
},
{
"name": "Python",
"bytes": "119087"
}
],
"symlink_target": ""
} |
import numpy as np
import cPickle
import os
import sys
import time
from os.path import join as pjoin
import theano
import theano.tensor as T
from models.Gauss_VAE import Gaussian_VAE
from models.neural_net.activation_fns import Sigmoid, Identity, Softmax
from models.neural_net.loss_fns import *
from utils.load_data import load_mnist, load_mnist_w_rotations, load_svhn_pca
from opt_fns import get_adam_updates
### Train & Evaluate ###
def train_and_eval_gaussian_vae(
dataset,
hidden_layer_sizes,
hidden_layer_types,
latent_size,
activations,
prior_mu,
prior_sigma,
n_epochs,
batch_size,
lookahead,
adam_lr,
experiment_dir,
output_file_base_name,
random_seed):
rng = np.random.RandomState(random_seed)
# LOAD DATA
if "mnist_plus_rot" in dataset:
datasets = load_mnist_w_rotations(dataset, target_as_one_hot=True, flatten=False, split=(70000, 10000, 20000))
input_layer_size = 28*28
layer_sizes = [input_layer_size] + hidden_layer_sizes
out_activation = Sigmoid
neg_log_likelihood_fn = calc_binaryVal_negative_log_likelihood
print "Dataset: MNIST+rot"
elif "mnist" in dataset:
# We follow the approach used in [2] to split the MNIST dataset.
datasets = load_mnist(dataset, target_as_one_hot=True, flatten=True, split=(45000, 5000, 10000))
input_layer_size = 28*28
layer_sizes = [input_layer_size] + hidden_layer_sizes
out_activation = Sigmoid
neg_log_likelihood_fn = calc_binaryVal_negative_log_likelihood
print "Dataset: MNIST"
elif "svhn_pca" in dataset:
datasets = load_svhn_pca(dataset, target_as_one_hot=True, train_valid_split=(65000, 8257))
input_layer_size = 500
layer_sizes = [input_layer_size] + hidden_layer_sizes
out_activation = Identity
neg_log_likelihood_fn = calc_realVal_negative_log_likelihood
print "Dataset: SVHN (PCA reduced)"
else:
print "no data found..."
exit()
train_set_x, _ = datasets[0]
valid_set_x, _ = datasets[1]
test_set_x, _ = datasets[2]
train_set_size = int(train_set_x.shape[0].eval())
valid_set_size = int(valid_set_x.shape[0].eval())
test_set_size = int(test_set_x.shape[0].eval())
print 'Datasets loaded ({:,} train | {:,} valid | {:,} test)'.format(train_set_size, valid_set_size, test_set_size)
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_size / batch_size
n_test_batches = test_set_size / batch_size
n_valid_batches = valid_set_size / batch_size
# BUILD MODEL
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x')
# construct the Gaussian Variational Autoencoder
model = Gaussian_VAE(rng=rng, input=x, batch_size=batch_size, layer_sizes=layer_sizes, layer_types=hidden_layer_types,
activations=activations, latent_size=latent_size, out_activation=out_activation)
# Build the expresson for the cost function.
data_ll_term = neg_log_likelihood_fn(x, model.x_recon)
kl = model.calc_kl_divergence(prior_mu=prior_mu, prior_sigma=prior_sigma)
# Compose into final costs
cost = T.mean( data_ll_term + kl )
updates = get_adam_updates(cost=cost, params=model.params, lr=adam_lr)
# Compile theano function for testing.
test_model = theano.function(
inputs = [index],
outputs = T.mean(neg_log_likelihood_fn(x, model.x_recon)),
givens = {x: test_set_x[index * batch_size:(index + 1) * batch_size]})
# Compile theano function for validation.
valid_model = theano.function(
inputs = [index],
outputs = T.mean(neg_log_likelihood_fn(x, model.x_recon)),
givens = {x: valid_set_x[index * batch_size:(index + 1) * batch_size]})
# Compile theano function for training.
train_model = theano.function(
inputs = [index],
outputs = [data_ll_term.mean(), kl.mean()],
updates = updates,
givens = {x: train_set_x[index * batch_size:(index + 1) * batch_size]})
# TRAIN MODEL #
print 'Training for {} epochs ...'.format(n_epochs)
best_params = None
best_valid_error = np.inf
best_iter = 0
start_time = time.clock()
# check if results file already exists, if so, append a number
results_file_name = pjoin(experiment_dir, "gauss_vae_results_"+output_file_base_name+".txt")
file_exists_counter = 0
while os.path.isfile(results_file_name):
file_exists_counter += 1
results_file_name = pjoin(experiment_dir, "gauss_vae_results_"+output_file_base_name+"_"+str(file_exists_counter)+".txt")
if file_exists_counter > 0:
output_file_base_name += "_"+str(file_exists_counter)
results_file = open(results_file_name, 'w')
stop_training = False
for epoch_counter in range(n_epochs):
if stop_training:
break
# Train this epoch
epoch_start_time = time.time()
avg_training_nll_tracker = 0.
avg_training_kl_tracker = 0.
for minibatch_index in xrange(n_train_batches):
avg_training_nll, avg_training_kl = train_model(minibatch_index)
# check for NaN, test model anyway even if one is detected
if (np.isnan(avg_training_nll) or np.isnan(avg_training_kl)):
print "found NaN...aborting training..."
results_file.write("found NaN...aborting training... \n\n")
if epoch_counter > 0:
for param, best_param in zip(model.params, best_params):
param.set_value(best_param)
test_error = sum([test_model(i) for i in xrange(n_test_batches)]) / n_test_batches
results = "Ended due to NaN! best epoch {}, best valid error {:.4f}, test error {:.4f}, training time {:.2f}m"
results = results.format(best_iter, best_valid_error, test_error, (end_time-start_time)/60)
print results
results_file.write(results + "\n")
results_file.close()
exit()
avg_training_nll_tracker += avg_training_nll
avg_training_kl_tracker += avg_training_kl
epoch_end_time = time.time()
# Compute some infos about training.
avg_training_nll_tracker /= (minibatch_index+1)
avg_training_kl_tracker /= (minibatch_index+1)
# Compute validation error
valid_error = sum([valid_model(i) for i in xrange(n_valid_batches)])/n_valid_batches
results = "epoch {}, training loss (NLL) {:.4f}, training kl divergence {:.4f}, valid error {:.4f}, time {:.2f} "
if valid_error < best_valid_error:
best_iter = epoch_counter
best_valid_error = valid_error
results += " ***"
# Save progression
best_params = [param.get_value().copy() for param in model.params]
cPickle.dump(best_params, open(pjoin(experiment_dir, 'gauss_vae_params_'+output_file_base_name+'.pkl'), 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
elif epoch_counter-best_iter > lookahead:
stop_training = True
# Report and save progress.
results = results.format(epoch_counter, avg_training_nll_tracker, avg_training_kl_tracker, valid_error, (epoch_end_time-epoch_start_time)/60)
print results
results_file.write(results + "\n")
results_file.flush()
end_time = time.clock()
# Reload best model.
for param, best_param in zip(model.params, best_params):
param.set_value(best_param)
# Compute test error on best epoch
test_error = sum([test_model(i) for i in xrange(n_test_batches)])/n_test_batches
results = "Done! best epoch {}, best valid error {:.4f}, test error {:.4f}, training time {:.2f}m"
results = results.format(best_iter, best_valid_error, test_error, (end_time-start_time)/60)
print results
results_file.write(results + "\n")
results_file.close()
print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))
| {
"content_hash": "13076d7931a12889ec855db720cf3546",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 157,
"avg_line_length": 40.888888888888886,
"alnum_prop": 0.6148393194706995,
"repo_name": "enalisnick/stick-breaking_dgms",
"id": "1993546e707dec7eb2106523309851e927301eab",
"size": "8464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train_scripts/train_gauss_VAE.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113122"
}
],
"symlink_target": ""
} |
from model.contact import Contact
def test_add_contact(app, db, json_contacts, check_ui):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contact.create(contact)
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(),
key=Contact.id_or_max)
| {
"content_hash": "fb11dcd39ba0be64cefd8d9497370f54",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 108,
"avg_line_length": 47.92307692307692,
"alnum_prop": 0.5778491171749599,
"repo_name": "100loto/kruglov_group_16",
"id": "61f37b8c17e94ead51590f6af7a9cdccd5317c3d",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_add_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "820"
},
{
"name": "Python",
"bytes": "38358"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import re
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
log = logging.getLogger('urlrewrite')
class UrlRewrite(object):
"""
Generic configurable urlrewriter.
Example::
urlrewrite:
demonoid:
regexp: http://www\.demonoid\.com/files/details/
format: http://www.demonoid.com/files/download/HTTP/
"""
resolves = {}
schema = {
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'regexp': {'type': 'string', 'format': 'regex'},
'format': {'type': 'string'}
},
'required': ['regexp', 'format'],
'additionalProperties': False
}
}
def on_task_start(self, task, config):
resolves = self.resolves[task.name] = {}
for name, rewrite_config in config.items():
match = re.compile(rewrite_config['regexp'])
format = rewrite_config['format']
resolves[name] = {'regexp_compiled': match, 'format': format, 'regexp': rewrite_config['regexp']}
log.debug('Added rewrite %s' % name)
def url_rewritable(self, task, entry):
log.trace('running url_rewritable')
log.trace(self.resolves)
for name, config in self.resolves.get(task.name, {}).items():
regexp = config['regexp_compiled']
log.trace('testing %s' % config['regexp'])
if regexp.search(entry['url']):
return True
return False
def url_rewrite(self, task, entry):
for name, config in self.resolves.get(task.name, {}).items():
regexp = config['regexp_compiled']
format = config['format']
if regexp.search(entry['url']):
log.debug('Regexp resolving %s with %s' % (entry['url'], name))
# run the regexp
entry['url'] = regexp.sub(format, entry['url'])
if regexp.match(entry['url']):
entry.fail('urlrewriting')
raise UrlRewritingError('Regexp %s result should NOT continue to match!' % name)
return
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewrite, 'urlrewrite', groups=['urlrewriter'], api_ver=2)
| {
"content_hash": "f993eb96fba90467d619ded0984b7130",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 109,
"avg_line_length": 32.896103896103895,
"alnum_prop": 0.5752072641136992,
"repo_name": "qvazzler/Flexget",
"id": "24713be9f684f618695bf182c9c909ac26bc2cb1",
"size": "2533",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flexget/plugins/urlrewrite/urlrewrite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5275"
},
{
"name": "HTML",
"bytes": "33930"
},
{
"name": "JavaScript",
"bytes": "58811"
},
{
"name": "Python",
"bytes": "2428468"
}
],
"symlink_target": ""
} |
import re
from thingsboard_gateway.connectors.connector import log
class Device:
def __init__(self, path, name, config, converter, converter_for_sub):
self.path = path
self.name = name
self.config = config
self.converter = converter
self.converter_for_sub = converter_for_sub
self.values = {
'timeseries': [],
'attributes': []
}
self.__load_values()
def __repr__(self):
return f'{self.path}'
def __load_values(self):
for section in ('attributes', 'timeseries'):
for node_config in self.config.get(section, []):
try:
if re.search(r"(ns=\d+;[isgb]=[^}]+)", node_config['path']):
child = re.search(r"(ns=\d+;[isgb]=[^}]+)", node_config['path'])
self.values[section].append({'path': child.groups()[0], 'key': node_config['key']})
elif re.search(r"\${([A-Za-z.:\\\d]+)}", node_config['path']):
child = re.search(r"\${([A-Za-z.:\\\d]+)", node_config['path'])
self.values[section].append(
{'path': self.path + child.groups()[0].split('\\.'), 'key': node_config['key']})
except KeyError as e:
log.error('Invalid config for %s (key %s not found)', node_config, e)
| {
"content_hash": "0c2e23d863c74c0e502b98d14ca42e39",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 108,
"avg_line_length": 40.142857142857146,
"alnum_prop": 0.4918149466192171,
"repo_name": "thingsboard/thingsboard-gateway",
"id": "0347c32d2f323b9f4fb6a15b9b9f045072bbf7d9",
"size": "2022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thingsboard_gateway/connectors/opcua_asyncio/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1659"
},
{
"name": "PLpgSQL",
"bytes": "5034"
},
{
"name": "Python",
"bytes": "1076772"
},
{
"name": "Shell",
"bytes": "10610"
}
],
"symlink_target": ""
} |
"""Tests for manipulating Nodes via the DB API"""
import datetime
import mock
import six
from ironic.common import exception
from ironic.common import utils as ironic_utils
from ironic.db import api as dbapi
from ironic.openstack.common import timeutils
from ironic.tests.db import base
from ironic.tests.db import utils
class DbNodeTestCase(base.DbTestCase):
def setUp(self):
super(DbNodeTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
def _create_test_node(self, **kwargs):
n = utils.get_test_node(**kwargs)
self.dbapi.create_node(n)
return n
def _create_many_test_nodes(self):
uuids = []
for i in range(1, 6):
n = self._create_test_node(id=i, uuid=ironic_utils.generate_uuid())
uuids.append(n['uuid'])
uuids.sort()
return uuids
def _create_associated_nodes(self):
uuids = []
uuids_with_instance = []
for i in range(1, 5):
uuid = ironic_utils.generate_uuid()
uuids.append(six.text_type(uuid))
if i < 3:
instance_uuid = ironic_utils.generate_uuid()
uuids_with_instance.append(six.text_type(uuid))
else:
instance_uuid = None
n = utils.get_test_node(id=i,
uuid=uuid,
instance_uuid=instance_uuid)
self.dbapi.create_node(n)
uuids.sort()
uuids_with_instance.sort()
return (uuids, uuids_with_instance)
def test_create_node(self):
self._create_test_node()
def test_create_node_nullable_chassis_id(self):
n = utils.get_test_node()
del n['chassis_id']
self.dbapi.create_node(n)
def test_get_node_by_id(self):
n = self._create_test_node()
res = self.dbapi.get_node(n['id'])
self.assertEqual(n['uuid'], res.uuid)
def test_get_node_by_uuid(self):
n = self._create_test_node()
res = self.dbapi.get_node(n['uuid'])
self.assertEqual(n['id'], res.id)
def test_get_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node, 99)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node,
'12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.InvalidIdentity,
self.dbapi.get_node, 'not-a-uuid')
def test_get_nodeinfo_list_defaults(self):
for i in range(1, 6):
n = utils.get_test_node(id=i, uuid=ironic_utils.generate_uuid())
self.dbapi.create_node(n)
res = [i[0] for i in self.dbapi.get_nodeinfo_list()]
self.assertEqual(sorted(res), sorted(range(1, 6)))
def test_get_nodeinfo_list_with_cols(self):
uuids = {}
extras = {}
for i in range(1, 6):
uuid = ironic_utils.generate_uuid()
extra = {'foo': i}
uuids[i] = uuid
extras[i] = extra
n = utils.get_test_node(id=i, extra=extra, uuid=uuid)
self.dbapi.create_node(n)
res = self.dbapi.get_nodeinfo_list(columns=['id', 'extra', 'uuid'])
self.assertEqual(extras, dict((r[0], r[1]) for r in res))
self.assertEqual(uuids, dict((r[0], r[2]) for r in res))
def test_get_nodeinfo_list_with_filters(self):
n1 = utils.get_test_node(id=1, driver='driver-one',
instance_uuid=ironic_utils.generate_uuid(),
reservation='fake-host',
uuid=ironic_utils.generate_uuid())
n2 = utils.get_test_node(id=2, driver='driver-two',
uuid=ironic_utils.generate_uuid(),
maintenance=True)
self.dbapi.create_node(n1)
self.dbapi.create_node(n2)
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'driver-one'})
self.assertEqual([1], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': True})
self.assertEqual([1], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': False})
self.assertEqual([2], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'reserved': True})
self.assertEqual([1], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'reserved': False})
self.assertEqual([2], [r[0] for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([2], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([1], [r.id for r in res])
def test_get_node_list(self):
uuids = []
for i in range(1, 6):
n = utils.get_test_node(id=i, uuid=ironic_utils.generate_uuid())
self.dbapi.create_node(n)
uuids.append(six.text_type(n['uuid']))
res = self.dbapi.get_node_list()
res_uuids = [r.uuid for r in res]
self.assertEqual(uuids.sort(), res_uuids.sort())
def test_get_node_list_with_filters(self):
ch1 = utils.get_test_chassis(id=1, uuid=ironic_utils.generate_uuid())
ch2 = utils.get_test_chassis(id=2, uuid=ironic_utils.generate_uuid())
self.dbapi.create_chassis(ch1)
self.dbapi.create_chassis(ch2)
n1 = utils.get_test_node(id=1, driver='driver-one',
instance_uuid=ironic_utils.generate_uuid(),
reservation='fake-host',
uuid=ironic_utils.generate_uuid(),
chassis_id=ch1['id'])
n2 = utils.get_test_node(id=2, driver='driver-two',
uuid=ironic_utils.generate_uuid(),
chassis_id=ch2['id'],
maintenance=True)
self.dbapi.create_node(n1)
self.dbapi.create_node(n2)
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']})
self.assertEqual([1], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch2['uuid']})
self.assertEqual([2], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'driver-one'})
self.assertEqual([1], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': True})
self.assertEqual([1], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': False})
self.assertEqual([2], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': True})
self.assertEqual([1], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': False})
self.assertEqual([2], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([2], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([1], [r.id for r in res])
def test_get_node_list_chassis_not_found(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.get_node_list,
{'chassis_uuid': ironic_utils.generate_uuid()})
def test_get_node_by_instance(self):
n = self._create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
res = self.dbapi.get_node_by_instance(n['instance_uuid'])
self.assertEqual(n['uuid'], res.uuid)
def test_get_node_by_instance_wrong_uuid(self):
self._create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.InstanceNotFound,
self.dbapi.get_node_by_instance,
'12345678-9999-0000-bbbb-123456789012')
def test_get_node_by_instance_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
self.dbapi.get_node_by_instance,
'fake_uuid')
def test_destroy_node(self):
n = self._create_test_node()
self.dbapi.destroy_node(n['id'])
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node, n['id'])
def test_destroy_node_by_uuid(self):
n = self._create_test_node()
self.dbapi.destroy_node(n['uuid'])
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node, n['uuid'])
def test_destroy_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.destroy_node,
'12345678-9999-0000-aaaa-123456789012')
def test_ports_get_destroyed_after_destroying_a_node(self):
n = self._create_test_node()
node_id = n['id']
p = utils.get_test_port(node_id=node_id)
p = self.dbapi.create_port(p)
self.dbapi.destroy_node(node_id)
self.assertRaises(exception.PortNotFound, self.dbapi.get_port, p.id)
def test_ports_get_destroyed_after_destroying_a_node_by_uuid(self):
n = self._create_test_node()
node_id = n['id']
p = utils.get_test_port(node_id=node_id)
p = self.dbapi.create_port(p)
self.dbapi.destroy_node(n['uuid'])
self.assertRaises(exception.PortNotFound, self.dbapi.get_port, p.id)
def test_update_node(self):
n = self._create_test_node()
old_extra = n['extra']
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(n['id'], {'extra': new_extra})
self.assertEqual(new_extra, res.extra)
def test_update_node_not_found(self):
node_uuid = ironic_utils.generate_uuid()
new_extra = {'foo': 'bar'}
self.assertRaises(exception.NodeNotFound, self.dbapi.update_node,
node_uuid, {'extra': new_extra})
def test_update_node_associate_and_disassociate(self):
n = self._create_test_node()
new_i_uuid = ironic_utils.generate_uuid()
res = self.dbapi.update_node(n['id'], {'instance_uuid': new_i_uuid})
self.assertEqual(new_i_uuid, res.instance_uuid)
res = self.dbapi.update_node(n['id'], {'instance_uuid': None})
self.assertIsNone(res.instance_uuid)
def test_update_node_already_assosicated(self):
n = self._create_test_node()
new_i_uuid_one = ironic_utils.generate_uuid()
self.dbapi.update_node(n['id'], {'instance_uuid': new_i_uuid_one})
new_i_uuid_two = ironic_utils.generate_uuid()
self.assertRaises(exception.NodeAssociated,
self.dbapi.update_node,
n['id'],
{'instance_uuid': new_i_uuid_two})
@mock.patch.object(timeutils, 'utcnow')
def test_update_node_provision(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
n = self._create_test_node()
res = self.dbapi.update_node(n['id'], {'provision_state': 'fake'})
self.assertEqual(mocked_time,
timeutils.normalize_time(res['provision_updated_at']))
def test_update_node_no_provision(self):
n = self._create_test_node()
res = self.dbapi.update_node(n['id'], {'extra': {'foo': 'bar'}})
self.assertIsNone(res['provision_updated_at'])
def test_reserve_one_node(self):
n = self._create_test_node()
uuid = n['uuid']
r1 = 'fake-reservation'
# reserve the node
self.dbapi.reserve_nodes(r1, [uuid])
# check reservation
res = self.dbapi.get_node(uuid)
self.assertEqual(r1, res.reservation)
def test_release_reservation(self):
n = self._create_test_node()
uuid = n['uuid']
r1 = 'fake-reservation'
self.dbapi.reserve_nodes(r1, [uuid])
# release reservation
self.dbapi.release_nodes(r1, [uuid])
res = self.dbapi.get_node(uuid)
self.assertIsNone(res.reservation)
def test_reservation_of_reserved_node_fails(self):
n = self._create_test_node()
uuid = n['uuid']
r1 = 'fake-reservation'
r2 = 'another-reservation'
# reserve the node
self.dbapi.reserve_nodes(r1, [uuid])
# another host fails to reserve or release
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_nodes,
r2, [uuid])
self.assertRaises(exception.NodeLocked,
self.dbapi.release_nodes,
r2, [uuid])
def test_reservation_after_release(self):
n = self._create_test_node()
uuid = n['uuid']
r1 = 'fake-reservation'
r2 = 'another-reservation'
self.dbapi.reserve_nodes(r1, [uuid])
self.dbapi.release_nodes(r1, [uuid])
# another host succeeds
self.dbapi.reserve_nodes(r2, [uuid])
res = self.dbapi.get_node(uuid)
self.assertEqual(r2, res.reservation)
def test_reserve_many_nodes(self):
uuids = self._create_many_test_nodes()
r1 = 'first-reservation'
self.dbapi.reserve_nodes(r1, uuids)
for uuid in uuids:
res = self.dbapi.get_node(uuid)
self.assertEqual(r1, res.reservation)
def test_reserve_overlaping_ranges_fails(self):
uuids = self._create_many_test_nodes()
r1 = 'first-reservation'
r2 = 'second-reservation'
self.dbapi.reserve_nodes(r1, uuids[:3])
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_nodes,
r2, uuids)
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_nodes,
r2, uuids[2:])
def test_reserve_non_overlaping_ranges(self):
uuids = self._create_many_test_nodes()
r1 = 'first-reservation'
r2 = 'second-reservation'
self.dbapi.reserve_nodes(r1, uuids[:3])
self.dbapi.reserve_nodes(r2, uuids[3:])
for i in range(0, len(uuids)):
res = self.dbapi.get_node(uuids[i])
reservation = r1 if i < 3 else r2
self.assertEqual(reservation, res.reservation)
def test_reserve_empty(self):
self.assertRaises(exception.InvalidIdentity,
self.dbapi.reserve_nodes, 'reserv1', [])
def test_reservation_in_exception_message(self):
n = self._create_test_node()
uuid = n['uuid']
r = 'fake-reservation'
self.dbapi.reserve_nodes(r, [uuid])
try:
self.dbapi.reserve_nodes('another', [uuid])
except exception.NodeLocked as e:
self.assertIn(r, str(e))
def test_release_overlaping_ranges_fails(self):
uuids = self._create_many_test_nodes()
r1 = 'first-reservation'
r2 = 'second-reservation'
self.dbapi.reserve_nodes(r1, uuids[:3])
self.dbapi.reserve_nodes(r2, uuids[3:])
self.assertRaises(exception.NodeLocked,
self.dbapi.release_nodes,
r1, uuids)
def test_release_non_ranges(self):
uuids = self._create_many_test_nodes()
r1 = 'first-reservation'
r2 = 'second-reservation'
self.dbapi.reserve_nodes(r1, uuids[:3])
self.dbapi.reserve_nodes(r2, uuids[3:])
self.dbapi.release_nodes(r1, uuids[:3])
self.dbapi.release_nodes(r2, uuids[3:])
for uuid in uuids:
res = self.dbapi.get_node(uuid)
self.assertIsNone(res.reservation)
| {
"content_hash": "798dfd742c4d93178104358515776f8e",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 79,
"avg_line_length": 35.966814159292035,
"alnum_prop": 0.5677554284308298,
"repo_name": "varunarya10/ironic",
"id": "4f631e9d749df87500512a0b57b10f66495d620d",
"size": "16914",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/tests/db/test_nodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1640165"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1
async def sample_purge_artifacts():
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeArtifactsRequest(
parent="parent_value",
filter="filter_value",
)
# Make the request
operation = client.purge_artifacts(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_MetadataService_PurgeArtifacts_async]
| {
"content_hash": "e48fb970bc51027229a720c3a5c20c11",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 68,
"avg_line_length": 25.041666666666668,
"alnum_prop": 0.7071547420965059,
"repo_name": "googleapis/python-aiplatform",
"id": "50e7e64a414c66c0b9046dcc50d52426e33fdbde",
"size": "1997",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
"""
Quota synteny alignment (QUOTA-ALIGN)
%prog [options] anchorsfile --qbed=qbedfile --sbed=sbedfile
This python program does the following:
1. merge 2D-overlapping blocks (now skipped, but existed in original version)
2. build constraints that represent 1D-overlap among blocks
3. feed the data into the linear programming solver
The algorithm is described in Tang et al. BMC Bioinformatics 2011.
"Screening synteny blocks in pairwise genome comparisons through integer
programming."
"""
import logging
import os.path as op
import sys
from jcvi.algorithms.lpsolve import MIPDataModel
from jcvi.compara.synteny import AnchorFile, _score, check_beds
from jcvi.formats.base import must_open
from jcvi.apps.base import OptionParser
def get_1D_overlap(eclusters, depth=1):
"""
Find blocks that are 1D overlapping,
returns cliques of block ids that are in conflict
"""
overlap_set = set()
active = set()
ends = []
for i, (chr, left, right) in enumerate(eclusters):
ends.append((chr, left, 0, i)) # 0/1 for left/right-ness
ends.append((chr, right, 1, i))
ends.sort()
chr_last = ""
for chr, _, left_right, i in ends:
if chr != chr_last:
active.clear()
if left_right == 0:
active.add(i)
else:
active.remove(i)
if len(active) > depth:
overlap_set.add(tuple(sorted(active)))
chr_last = chr
return overlap_set
def make_range(clusters, extend=0):
"""
Convert to interval ends from a list of anchors
extend modifies the xmax, ymax boundary of the box,
which can be positive or negative
very useful when we want to make the range as fuzzy as we specify
"""
eclusters = []
for cluster in clusters:
xlist, ylist, _ = zip(*cluster)
score = _score(cluster)
xchr, xmin = min(xlist)
xchr, xmax = max(xlist)
ychr, ymin = min(ylist)
ychr, ymax = max(ylist)
# allow fuzziness to the boundary
xmax += extend
ymax += extend
# because extend can be negative values, we don't want it to be less than min
if xmax < xmin:
xmin, xmax = xmax, xmin
if ymax < ymin:
ymin, ymax = ymax, ymin
eclusters.append(((xchr, xmin, xmax), (ychr, ymin, ymax), score))
return eclusters
def get_constraints(clusters, quota=(1, 1), Nmax=0):
"""
Check pairwise cluster comparison, if they overlap then mark edge as conflict
"""
qa, qb = quota
eclusters = make_range(clusters, extend=-Nmax)
nodes = [c[-1] for c in eclusters]
eclusters_x, eclusters_y, _ = zip(*eclusters)
# represents the contraints over x-axis and y-axis
constraints_x = get_1D_overlap(eclusters_x, qa)
constraints_y = get_1D_overlap(eclusters_y, qb)
return nodes, constraints_x, constraints_y
def create_data_model(nodes, constraints_x, qa, constraints_y, qb):
"""
Maximize
4 x1 + 2 x2 + 3 x3 + x4
Subject To
x1 + x2 <= 1
End
"""
num_vars = len(nodes)
obj_coeffs = nodes[:]
constraint_coeffs = []
bounds = []
for c in constraints_x:
constraint_coeffs.append({x: 1 for x in c})
bounds.append(qa)
num_constraints = len(constraints_x)
# non-self
if not (constraints_x is constraints_y):
for c in constraints_y:
constraint_coeffs.append({x: 1 for x in c})
bounds.append(qb)
num_constraints += len(constraints_y)
return MIPDataModel(
constraint_coeffs, bounds, obj_coeffs, num_vars, num_constraints
)
def solve_lp(
clusters,
quota,
work_dir="work",
Nmax=0,
self_match=False,
verbose=False,
):
"""
Solve the formatted LP instance
"""
qb, qa = quota # flip it
nodes, constraints_x, constraints_y = get_constraints(clusters, (qa, qb), Nmax=Nmax)
if self_match:
constraints_x = constraints_y = constraints_x | constraints_y
data = create_data_model(nodes, constraints_x, qa, constraints_y, qb)
return data.solve(work_dir=work_dir, verbose=verbose)
def read_clusters(qa_file, qorder, sorder):
"""Read in the clusters from anchors file
Args:
qa_file (str): Path to input file
qorder (dict): Dictionary to find position of feature in query
sorder (dict): Dictionary to find position of feature in subject
Returns:
List: List of matches and scores
"""
af = AnchorFile(qa_file)
blocks = af.blocks
clusters = []
for block in blocks:
cluster = []
for a, b, score in block:
ia, oa = qorder[a]
ib, ob = sorder[b]
ca, cb = oa.seqid, ob.seqid
cluster.append(((ca, ia), (cb, ib), score))
clusters.append(cluster)
return clusters
def main(args):
p = OptionParser(__doc__)
p.set_beds()
p.add_option(
"--quota",
default="1:1",
help="`quota mapping` procedure -- screen blocks to constrain mapping"
" (useful for orthology), "
"put in the format like (#subgenomes expected for genome X):"
"(#subgenomes expected for genome Y)",
)
p.add_option(
"--Nm",
dest="Nmax",
type="int",
default=10,
help="distance cutoff to tolerate two blocks that are "
"slightly overlapping (cutoff for `quota mapping`) "
"[default: %default units (gene or bp dist)]",
)
p.add_option(
"--self",
dest="self_match",
action="store_true",
default=False,
help="you might turn this on when screening paralogous blocks, "
"esp. if you have reduced mirrored blocks into non-redundant set",
)
p.set_verbose(help="Show verbose solver output")
p.add_option(
"--screen",
default=False,
action="store_true",
help="generate new anchors file",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(qa_file,) = args
_, _, qorder, sorder, _ = check_beds(qa_file, p, opts)
# sanity check for the quota
if opts.quota:
try:
qa, qb = opts.quota.split(":")
qa, qb = int(qa), int(qb)
except ValueError:
logging.error("quota string should be the form x:x (2:4, 1:3, etc.)")
sys.exit(1)
if opts.self_match and qa != qb:
raise Exception(
"when comparing genome to itself, "
"quota must be the same number "
"(like 1:1, 2:2) you have %s" % opts.quota
)
quota = (qa, qb)
self_match = opts.self_match
clusters = read_clusters(qa_file, qorder, sorder)
for cluster in clusters:
assert len(cluster) > 0
# below runs `quota mapping`
work_dir = op.join(op.dirname(op.abspath(qa_file)), "work")
selected_ids = solve_lp(
clusters,
quota,
work_dir=work_dir,
Nmax=opts.Nmax,
self_match=self_match,
verbose=opts.verbose,
)
logging.debug("Selected %d blocks", len(selected_ids))
prefix = qa_file.rsplit(".", 1)[0]
suffix = "{}x{}".format(qa, qb)
outfile = ".".join((prefix, suffix))
fw = must_open(outfile, "w")
print(",".join(str(x) for x in selected_ids), file=fw)
fw.close()
logging.debug("Screened blocks ids written to `%s`", outfile)
if opts.screen:
from jcvi.compara.synteny import screen
new_qa_file = ".".join((prefix, suffix, "anchors"))
largs = [qa_file, new_qa_file, "--ids", outfile]
if opts.qbed and opts.sbed:
largs += ["--qbed={0}".format(opts.qbed)]
largs += ["--sbed={0}".format(opts.sbed)]
screen(largs)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "9c6d319dc2cc51b1fb8ebd25b1223085",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 88,
"avg_line_length": 27.760563380281692,
"alnum_prop": 0.5927194317605277,
"repo_name": "tanghaibao/jcvi",
"id": "de8b7acd3afe13178fbb51c2fff4bd28126dc1b6",
"size": "7931",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jcvi/compara/quota.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Cython",
"bytes": "10467"
},
{
"name": "Dockerfile",
"bytes": "1150"
},
{
"name": "Makefile",
"bytes": "445"
},
{
"name": "Python",
"bytes": "2635155"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 0])
shifted_gaussian2 = np.random.randn(n_samples, 2) + np.array([40, 0])
# generate zero centered stretched Gaussian data
C = np.array([[1., 0.], [0., 1.]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, shifted_gaussian2,stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=3, covariance_type='tied')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show() | {
"content_hash": "78ebe47a4964184b95e0954c34592ec0",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 32.170731707317074,
"alnum_prop": 0.6990144048521607,
"repo_name": "ronnyb29/kmeans-theory",
"id": "a8e28214a579182260bd0d1b0a4a91508a33575b",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/gmm_fit_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "338614"
},
{
"name": "HTML",
"bytes": "54329"
},
{
"name": "JavaScript",
"bytes": "259027"
},
{
"name": "Jupyter Notebook",
"bytes": "86564"
},
{
"name": "Python",
"bytes": "12596"
},
{
"name": "TeX",
"bytes": "10286"
}
],
"symlink_target": ""
} |
import sys
"""Sample that sets the replication behavior or recovery point objective (RPO) to default.
This sample is used on this page:
https://cloud.google.com/storage/docs/managing-turbo-replication
For more information, see README.md.
"""
# [START storage_set_rpo_default]
from google.cloud import storage
from google.cloud.storage.constants import RPO_DEFAULT
def set_rpo_default(bucket_name):
"""Sets the RPO to DEFAULT, disabling the turbo replication feature"""
# The ID of your GCS bucket
# bucket_name = "my-bucket"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
bucket.rpo = RPO_DEFAULT
bucket.patch()
print(f"RPO is set to DEFAULT for {bucket.name}.")
# [END storage_set_rpo_default]
if __name__ == "__main__":
set_rpo_default(bucket_name=sys.argv[1])
| {
"content_hash": "19a1fdf31bb0aa30e3448652fb3f86a5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 90,
"avg_line_length": 26.40625,
"alnum_prop": 0.7088757396449704,
"repo_name": "googleapis/python-storage",
"id": "883fee0c972c1e9b3c272e8ee3580c1e6334c226",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/storage_set_rpo_default.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1392987"
},
{
"name": "Shell",
"bytes": "32171"
}
],
"symlink_target": ""
} |
import argparse
import urllib.request
import re
import os
import csv
import io
import gnupg
import sys
import shutil
import time
USER_HOME = os.path.expanduser('~')
USER_M2_REPO = USER_HOME + '/.m2/repository/'
SONATYPE_RELEASE_URL = "https://oss.sonatype.org/content/repositories/releases"
SONATYPE_SNAPSHOT_URL = "https://oss.sonatype.org/content/repositories/snapshots"
SNAPSHOT = '-SNAPSHOT'
SOURCES_REGEX = re.compile('".*sources.jar"')
JAR_REGEX = re.compile('".*[0-9].jar"')
httplib2Available = True
try:
import httplib2
httplib2CacheDir = USER_HOME + '/.cache/httplib2'
if not os.path.exists(httplib2CacheDir):
os.makedirs(httplib2CacheDir)
h = httplib2.Http(httplib2CacheDir)
except ImportError:
httplib2Available = False
def downloadFile(url, dest):
if httplib2Available:
response, content = h.request(url)
with open(dest, 'wb') as f:
f.write(content)
else:
urllib.request.urlretrieve(url, dest)
def getDirectoryContent(url):
if httplib2Available:
response, content = h.request(url)
return content.decode('utf-8')
else:
path = urllib.request.urlopen(url)
return path.read().decode('utf-8')
class MavenArtifact:
def __init__(self, groupId, artifactId, version, pgpKeyFingerprint):
self.groupId = groupId
self.artifactId = artifactId
self.version = version
self.pgpKeyFingerprint = pgpKeyFingerprint
self.isSnapshot = version.endswith(SNAPSHOT)
directory = groupId.replace('.', '/')
self.mavenRepoUrl = '/' + directory + '/' + self.artifactId + '/' + self.version
if self.isSnapshot:
self.artifactDirectoryUrl = SONATYPE_SNAPSHOT_URL + self.mavenRepoUrl
self.artifactIdAndVersion = self.artifactId + '-' + self.version[:-len(SNAPSHOT)]
self.destFilenamePrefix = self.artifactIdAndVersion + SNAPSHOT
else:
self.artifactDirectoryUrl = SONATYPE_RELEASE_URL + self.mavenRepoUrl
self.artifactIdAndVersion = self.artifactId + '-' + self.version
self.destFilenamePrefix = self.artifactIdAndVersion
self.jarDestFilename = self.destFilenamePrefix + '.jar'
self.jarSigDestFilename = self.jarDestFilename + '.asc'
self.jarSourceDestFilename = self.destFilenamePrefix + '-sources.jar'
maybeSnapshot = ""
if self.isSnapshot:
maybeSnapshot = SNAPSHOT
self.localJarUrl = USER_M2_REPO + self.mavenRepoUrl + '/' + self.artifactIdAndVersion + maybeSnapshot + '.jar'
self.localSourceUrl = USER_M2_REPO + self.mavenRepoUrl + '/' + self.artifactIdAndVersion + maybeSnapshot + '-sources.jar'
self.localJarSigUrl = self.localJarUrl + '.asc'
self.localJarTimestamp = time.gmtime(0)
if os.path.isfile(self.localJarUrl):
self.localJarTimestamp = time.gmtime(os.path.getmtime(self.localJarUrl))
def __str__(self):
return self.groupId + ':' + self.artifactId + ':' + self.version
def installIn(self, project):
jarDest = project.libsDir + self.jarDestFilename
jarSigDest = project.libsDir + self.jarSigDestFilename
jarSourceDest = project.libsSourcesDir + self.jarSourceDestFilename
if os.path.exists(jarDest):
if self.isSnapshot:
print("Not fetching " + str(self) + " as SNAPSHOT artifact already exists")
return
elif self.verifySignature(jarSigDest, jarDest):
print("Not fetching " + str(self) + " as artifact already exists and signature is valid")
return
# Delete old artifacts
regex = re.compile(self.artifactId + '.*')
dirs = [project.libsDir, project.libsSourcesDir]
for d in dirs:
for f in os.listdir(d):
if regex.match(f):
fileToDelete = os.path.join(d, f)
print("Deleting old artifact " + fileToDelete)
os.remove(fileToDelete)
if not self.isSnapshot:
remoteJarUrl = self.artifactDirectoryUrl + '/' + self.artifactIdAndVersion + '.jar'
remoteSourceUrl = self.artifactDirectoryUrl + '/' + self.artifactIdAndVersion + '-sources.jar'
else:
print("Looking up remote artifact for " + str(self))
content = getDirectoryContent(self.artifactDirectoryUrl)
jars = JAR_REGEX.findall(content)
jars.sort()
newestJar = jars[-1]
remoteJarUrl = newestJar.replace('"', '')
components = remoteJarUrl.split('/')[-1].split('-')
timestamp = components[-2]
identifier = components[-1].split('.')[0]
remoteJarTimestamp = time.strptime(timestamp, "%Y%m%d.%H%M%S")
remoteSourceUrl = self.artifactDirectoryUrl + '/' + self.artifactIdAndVersion + '-' + timestamp + '-' + identifier + '-sources.jar'
remoteJarSigUrl = remoteJarUrl + '.asc'
# Place in project logic
# If it's a snapshot and the localJartimestamp is newer then the remove, use the local one.
# Or use the local one if it's not a snapshot but we have the artifact in the local maven cache
if ((self.isSnapshot and (self.localJarTimestamp > remoteJarTimestamp)) or (not self.isSnapshot and os.path.isfile(self.localJarUrl))):
print("Copying " + self.localJarUrl + " to " + project.libsDir)
shutil.copy(self.localJarUrl, jarDest)
if not self.isSnapshot:
print("Copying " + self.localJarSigUrl + " to " + project.libsDir)
shutil.copy(self.localJarSigUrl, jarSigDest)
print("Copying " + self.localSourceUrl + " to " + project.libsSourcesDir)
shutil.copy(self.localSourceUrl, jarSourceDest)
# Otherwise use the remote (snapshot) artifact
else:
print("Downloading " + self.jarDestFilename + " to " + project.libsDir)
downloadFile(remoteJarUrl, jarDest)
if not self.isSnapshot:
print("Downloading " + self.jarSigDestFilename + " to " + project.libsDir)
downloadFile(remoteJarSigUrl, jarSigDest)
print("Downloading " + self.jarSourceDestFilename + " to " + project.libsSourcesDir)
downloadFile(remoteSourceUrl, jarSourceDest)
if not self.isSnapshot:
if self.verifySignature(jarSigDest, jarDest):
print("Successfully verified signature for " + jarDest)
else:
raise Exception("Could not verify signature for " + jarDest)
# create the .properties file
f = open(jarDest + '.properties', 'w+')
f.write('src=../libs-sources/' + self.jarSourceDestFilename)
f.close()
def verifySignature(self, detachedSigFile, dataFile):
gpg = gnupg.GPG()
availableKeys = gpg.list_keys()
if not any(key['fingerprint'] == self.pgpKeyFingerprint for key in availableKeys):
longId = self.pgpKeyFingerprint[-16:]
import_result = gpg.recv_keys('pgp.mit.edu', '0x' + longId)
with io.open(detachedSigFile, 'rb') as f:
v = gpg.verify_file(f, dataFile)
return v.fingerprint == self.pgpKeyFingerprint
class Project:
def __init__(self, projectDir):
self.projectDir = projectDir
self.libsDir = self.projectDir + '/libs/'
self.libsSourcesDir = self.projectDir + '/libs-sources/'
if not os.path.exists(self.libsDir):
os.makedirs(self.libsDir)
if not os.path.exists(self.libsSourcesDir):
os.makedirs(self.libsSourcesDir)
def processArtifactsFile(artifactsFile, artifacts):
versionVariables = {}
csvLines = []
with open(artifactsFile) as f:
for line in f:
if '=' in line:
versionVariableLine = line.split('=', 1)
versionVariables[versionVariableLine[0]] = versionVariableLine[1].rstrip()
else:
csvLines.append(line)
reader = csv.reader(csvLines)
for row in reader:
groupId = row[0]
artifactId = row[1]
pgpKeyFingerprint = row[3]
if row[2][0] == '$':
version = versionVariables[row[2][1:]]
else:
version = row[2]
artifacts.append(MavenArtifact(groupId, artifactId, version, pgpKeyFingerprint))
parser = argparse.ArgumentParser()
parser.add_argument("--project", "-p")
parser.add_argument("--file", "-f", nargs='*', help="Optional additional artifact files")
args = parser.parse_args()
args.project = os.path.abspath(args.project)
project = Project(args.project)
artifacts = []
projectArtifacts = args.project + "/artifacts.csv"
if os.path.isfile(projectArtifacts):
processArtifactsFile(projectArtifacts, artifacts)
for artifactFile in args.file:
if os.path.isfile(artifactFile):
processArtifactsFile(artifactFile, artifacts)
else:
print("Specified file does not exist")
for a in artifacts:
a.installIn(project)
| {
"content_hash": "71c0f79db9d52959092fa6926db7f13c",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 139,
"avg_line_length": 38.817351598173516,
"alnum_prop": 0.678508410775203,
"repo_name": "Flowdalic/MavenToAndroidAnt",
"id": "7a2ebec44914fe6c20f865ee2fe2e7b9df45fbc1",
"size": "8524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getMavenArtifactsNG.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15588"
}
],
"symlink_target": ""
} |
__author__ = 'rcj1492'
__created__ = '2017.03'
__license__ = 'MIT'
'''
PLEASE NOTE: mailgun requires domain verification to send messages
api uses [email protected] as sender address
make sure to add mx record under the mailgun subdomain
SETUP: https://documentation.mailgun.com/quickstart-sending.html#how-to-verify-your-domain
'''
# TODO: incorporate rate limiting logic
class mailgunHandler(object):
''' handles responses from mailgun api and usage data '''
_class_fields = {
'schema': {
'rate_limits': [
{ 'requests': 100, 'period': 3600 },
{ 'requests': 10000, 'period': 30 * 24 * 3600 }
]
}
}
def __init__(self, usage_client=None):
'''
initialization method for mailgun client class
:param usage_client: callable that records usage data
'''
# construct class field model
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# construct initial methods
self.rate_limits = self.fields.schema['rate_limits']
self.usage_client = usage_client
def handle(self, response):
# construct default response details
details = {
'method': response.request.method,
'code': response.status_code,
'url': response.url,
'error': '',
'json': None,
'headers': response.headers
}
# handle different codes
if details['code'] == 200:
details['json'] = response.json()
else:
details['error'] = response.content.decode()
# 200 Everything worked as expected
# 400 Bad Request - Often missing a required parameter
# 401 Unauthorized - No valid API key provided
# 402 Request Failed - Parameters were valid but request failed
# 404 Not Found - The requested item doesn’t exist
# 500, 502, 503, 504 Server Errors - something is wrong on Mailgun’s end
return details
# TODO: use webscraper, domain api and aws api to interact with registration
class mailgunRegister(object):
''' currently must be done manually '''
''' https://app.mailgun.com/app/account/security '''
''' https://documentation.mailgun.com/api-domains.html#domains '''
''' https://documentation.mailgun.com/quickstart-sending.html#how-to-verify-your-domain '''
def __init__(self):
pass
class mailgunClient(object):
''' a class of methods for managing email with mailgun api '''
# https://documentation.mailgun.com/api_reference.html
_class_fields = {
'schema': {
'api_endpoint': 'https://api.mailgun.net/v3',
'account_domain': 'collectiveacuity.com',
'api_key': 'key-e05af44440df8acc78ca21c26680fcc1',
'email_key': 'pubkey-ed63c920744999631abf67105ace5177',
'email_address': '[email protected]',
'recipient_list': [ '[email protected]' ],
'sender_email': '[email protected]',
'sender_name': 'Collective Acuity',
'email_subject': 'Test Mailgun API',
'content_text': 'Great to see it works!',
'content_html': '<p>Great to see it works!</p>',
'tracking_tags': [ 'newsletter' ],
'cc_list': [ '[email protected]' ],
'bcc_list': [ '[email protected]' ],
'delivery_time': 1490744726.6858199
}
}
def __init__(self, api_key, email_key, account_domain, usage_client=None, requests_handler=None):
'''
initialization method for mailgun client class
:param api_key: string with api key provided by mailgun
:param email_key: string with email validation key provide by mailgun
:param account_domain: string with domain from which to send email
:param usage_client: callable that records usage data
:param requests_handler: callable that handles requests errors
'''
title = '%s.__init__' % self.__class__.__name__
# construct class field model
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# validate inputs
input_fields = {
'api_key': api_key,
'email_key': email_key,
'account_domain': account_domain
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct client properties
self.api_endpoint = self.fields.schema['api_endpoint']
self.account_domain = account_domain
self.api_key = api_key
self.email_key = email_key
# construct handlers
self.service_handler = mailgunHandler(usage_client)
self.requests_handler = requests_handler
def _get_request(self, url, params):
import requests
# construct request kwargs
request_kwargs = {
'url': url,
'auth': ('api', self.email_key ),
'params': params
}
# send request
try:
response = requests.get(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'GET'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
# handle response
response_details = self.service_handler.handle(response)
return response_details
def _post_request(self, url, data):
import requests
# construct request kwargs
request_kwargs = {
'url': url,
'auth': ('api', self.api_key),
'data': data
}
# send request
try:
response = requests.post(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'POST'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
# handle response
response_details = self.service_handler.handle(response)
return response_details
def send_email(self, recipient_list, sender_email, sender_name, email_subject, content_text='', content_html='', tracking_tags=None, cc_list=None, bcc_list=None, delivery_time=0.0):
title = '%s.send_email' % __class__.__name__
# validate inputs
input_fields = {
'recipient_list': recipient_list,
'sender_email': sender_email,
'sender_name': sender_name,
'email_subject': email_subject,
'content_text': content_text,
'content_html': content_html,
'tracking_tags': tracking_tags,
'cc_list': cc_list,
'bcc_list': bcc_list,
'delivery_time': delivery_time
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request_kwargs
request_kwargs = {
'url': '%s/%s/messages' % (self.api_endpoint, self.account_domain),
'data': {
'to': recipient_list,
'from': '%s <%s>' % (sender_name, sender_email),
'h:X-Mailgun-Native-Send': True,
# 'o:require-tls': True,
'subject': email_subject
}
}
# add content
if content_text:
request_kwargs['data']['text'] = content_text
elif content_html:
request_kwargs['data']['html'] = content_html
else:
raise IndexError('%s() requires either a content_text or content_html arg.' % title)
# add optional fields
if tracking_tags:
request_kwargs['data']['o:tag'] = tracking_tags
if cc_list:
request_kwargs['data']['cc'] = cc_list
if bcc_list:
request_kwargs['data']['bcc'] = bcc_list
if delivery_time:
from time import time
current_time = time()
if delivery_time - current_time > 3 * 24 * 60 * 60:
raise ValueError('%s(delivery_time=%s) may not be more than 3 days from now.' % (title, delivery_time))
elif delivery_time - current_time > 0:
from labpack.records.time import labDT
js_time = labDT.fromEpoch(delivery_time).rfc2822()
request_kwargs['data']['o:deliverytime'] = js_time
# send request
response_details = self._post_request(**request_kwargs)
return response_details
def validate_email(self, email_address):
'''
a method to validate an email address
:param email_address: string with email address to validate
:return: dictionary with validation fields in response_details['json']
'''
title = '%s.validate_email' % __class__.__name__
# validate inputs
object_title = '%s(email_address="")' % title
email_address = self.fields.validate(email_address, '.email_address', object_title)
# construct request_kwargs
request_kwargs = {
'url': '%s/address/validate' % self.api_endpoint,
'params': { 'address': email_address }
}
# send request
response_details = self._get_request(**request_kwargs)
return response_details
if __name__ == '__main__':
from labpack.records.settings import load_settings
mailgun_cred = load_settings('../../../cred/mailgun.yaml')
# construct client
from labpack.handlers.requests import handle_requests
mailgun_kwargs = {
'api_key': mailgun_cred['mailgun_api_key'],
'email_key': mailgun_cred['mailgun_email_key'],
'account_domain': mailgun_cred['mailgun_spf_route'],
'requests_handler': handle_requests
}
mailgun_client = mailgunClient(**mailgun_kwargs)
# test validation
email_address = '[email protected]'
response_details = mailgun_client.validate_email(email_address)
assert response_details['json']['is_valid']
# test send email
from time import time
send_kwargs = {
'recipient_list': [ email_address ],
'sender_email': '[email protected]',
'sender_name': 'Collective Acuity',
'email_subject': 'Test Mailgun API %s' % time(),
'content_text': 'Great to see it works!',
'delivery_time': time() + 5
}
response_details = mailgun_client.send_email(**send_kwargs)
assert response_details['code'] == 200
| {
"content_hash": "3c7e59f42bef984fd2785edf697cd24f",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 185,
"avg_line_length": 35.05882352941177,
"alnum_prop": 0.5636700812433769,
"repo_name": "collectiveacuity/labPack",
"id": "6d86721408c6241c3faee98d2aa56b0b36cc88a1",
"size": "11328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labpack/email/mailgun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "792"
},
{
"name": "Python",
"bytes": "935809"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
import logging
import os
import re
import socket
import warnings
from conf_d import Configuration
from beaver.utils import eglob
class BeaverConfig():
def __init__(self, args, logger=None):
self._logger = logger or logging.getLogger(__name__)
self._logger.debug('Processing beaver portion of config file %s' % args.config)
self._section_defaults = {
'add_field': '',
'debug': '0',
'discover_interval': '15',
'encoding': 'utf_8',
# should be a python regex of files to remove
'exclude': '',
'format': '',
# throw out empty lines instead of shipping them
'ignore_empty': '0',
# allow ignoring copytruncate results
'ignore_truncate': '0',
# buffered tokenization
# we string-escape the delimiter later so that we can put escaped characters in our config file
'delimiter': '\n',
'size_limit': '',
# multiline events support. Default is disabled
'multiline_regex_after': '',
'multiline_regex_before': '',
'message_format': '',
'sincedb_write_interval': '15',
'stat_interval': '1',
'start_position': 'end',
'tags': '',
'tail_lines': '0',
'type': '',
}
self._main_defaults = {
'mqtt_clientid': 'mosquitto',
'mqtt_host': 'localhost',
'mqtt_port': '1883',
'mqtt_topic': '/logstash',
'mqtt_keepalive': '60',
'rabbitmq_host': os.environ.get('RABBITMQ_HOST', 'localhost'),
'rabbitmq_port': os.environ.get('RABBITMQ_PORT', '5672'),
'rabbitmq_vhost': os.environ.get('RABBITMQ_VHOST', '/'),
'rabbitmq_username': os.environ.get('RABBITMQ_USERNAME', 'guest'),
'rabbitmq_password': os.environ.get('RABBITMQ_PASSWORD', 'guest'),
'rabbitmq_queue': os.environ.get('RABBITMQ_QUEUE', 'logstash-queue'),
'rabbitmq_exchange_type': os.environ.get('RABBITMQ_EXCHANGE_TYPE', 'direct'),
'rabbitmq_exchange_durable': os.environ.get('RABBITMQ_EXCHANGE_DURABLE', '0'),
'rabbitmq_queue_durable': os.environ.get('RABBITMQ_QUEUE_DURABLE', '0'),
'rabbitmq_ha_queue': os.environ.get('RABBITMQ_HA_QUEUE', '0'),
'rabbitmq_key': os.environ.get('RABBITMQ_KEY', 'logstash-key'),
'rabbitmq_exchange': os.environ.get('RABBITMQ_EXCHANGE', 'logstash-exchange'),
'redis_url': os.environ.get('REDIS_URL', 'redis://localhost:6379/0'),
'redis_namespace': os.environ.get('REDIS_NAMESPACE', 'logstash:beaver'),
'redis_password': '',
'sqs_aws_access_key': '',
'sqs_aws_secret_key': '',
'sqs_aws_region': 'us-east-1',
'sqs_aws_queue': '',
'tcp_host': '127.0.0.1',
'tcp_port': '9999',
'tcp_ssl_enable': False,
'tcp_ssl_verify': False,
'tcp_ssl_cacert': None,
'tcp_ssl_cert': None,
'udp_host': os.environ.get('UDP_HOST', '127.0.0.1'),
'udp_port': os.environ.get('UDP_PORT', '9999'),
'zeromq_address': os.environ.get('ZEROMQ_ADDRESS', 'tcp://localhost:2120'),
'zeromq_pattern': 'push',
'zeromq_hwm': os.environ.get('ZEROMQ_HWM', ''),
# exponential backoff
'respawn_delay': '3',
'max_failure': '7',
# interprocess queue max size before puts block
'max_queue_size': '100',
# time in seconds before updating the file mapping
'update_file_mapping_time': '', # deprecated
'discover_interval': '15',
# time in seconds from last command sent before a queue kills itself
'queue_timeout': '60',
# kill and respawn worker process after given number of seconds
'refresh_worker_process': '',
# time in seconds to wait on queue.get() block before raising Queue.Empty exception
'wait_timeout': '5',
# path to sincedb sqlite db
'sincedb_path': '',
'logstash_version': '',
# ssh tunnel support
'ssh_key_file': '',
'ssh_tunnel': '',
'ssh_tunnel_port': '',
'ssh_remote_host': '',
'ssh_remote_port': '',
'ssh_options': '',
'subprocess_poll_sleep': '1',
# the following can be passed via argparse
'zeromq_bind': os.environ.get('BEAVER_MODE', 'bind' if os.environ.get('BIND', False) else 'connect'),
'files': os.environ.get('BEAVER_FILES', ''),
'format': os.environ.get('BEAVER_FORMAT', 'json'),
'fqdn': '0',
'hostname': '',
'output': '',
'path': os.environ.get('BEAVER_PATH', '/var/log'),
'transport': os.environ.get('BEAVER_TRANSPORT', 'stdout'), # this needs to be passed to the import class somehow
# Path to individual file configs. These override any sections in the main beaver.ini file
'confd_path': '/etc/beaver/conf.d',
# the following are parsed before the config file is parsed
# but may be useful at runtime
'config': '/dev/null',
'debug': '0',
'daemonize': '0',
'pid': '',
}
self._configfile = args.config
self._globbed = []
self._parse(args)
for key in self._beaver_config:
self._logger.debug('[CONFIG] "{0}" => "{1}"'.format(key, self._beaver_config.get(key)))
self._update_files()
self._check_for_deprecated_usage()
def beaver_config(self):
return self._beaver_config
def get(self, key, default=None):
return self._beaver_config.get(key, default)
def set(self, key, value):
self._beaver_config[key] = value
def get_field(self, field, filename):
return self._files.get(os.path.realpath(filename), self._section_defaults)[field]
def addglob(self, globname, globbed):
if globname not in self._globbed:
self._logger.debug('Adding glob {0}'.format(globname))
config = self._file_config[globname]
self._file_config[globname] = config
for key in config:
self._logger.debug('Config: "{0}" => "{1}"'.format(key, config[key]))
else:
config = self._file_config.get(globname)
for filename in globbed:
self._files[filename] = config
self._globbed.append(globname)
def getfilepaths(self):
return self._files.keys()
def getglobs(self):
globs = []
[globs.extend([name, self._file_config[name].get('exclude')]) for name in self._file_config]
return dict(zip(globs[0::2], globs[1::2]))
def use_ssh_tunnel(self):
required = [
'ssh_key_file',
'ssh_tunnel',
'ssh_tunnel_port',
'ssh_remote_host',
'ssh_remote_port',
]
has = len(filter(lambda x: self.get(x) is not None, required))
if has > 0 and has != len(required):
self._logger.warning('Missing {0} of {1} required config variables for ssh'.format(len(required) - has, len(required)))
return has == len(required)
def _check_for_deprecated_usage(self):
env_vars = [
'RABBITMQ_HOST',
'RABBITMQ_PORT',
'RABBITMQ_VHOST',
'RABBITMQ_USERNAME',
'RABBITMQ_PASSWORD',
'RABBITMQ_QUEUE',
'RABBITMQ_EXCHANGE_TYPE',
'RABBITMQ_EXCHANGE_DURABLE',
'RABBITMQ_KEY',
'RABBITMQ_EXCHANGE',
'REDIS_URL',
'REDIS_NAMESPACE',
'UDP_HOST',
'UDP_PORT',
'ZEROMQ_ADDRESS',
'BEAVER_FILES',
'BEAVER_FORMAT',
'BEAVER_MODE',
'BEAVER_PATH',
'BEAVER_TRANSPORT',
]
deprecated_env_var_usage = []
for e in env_vars:
v = os.environ.get(e, None)
if v is not None:
deprecated_env_var_usage.append(e)
if len(deprecated_env_var_usage) > 0:
warnings.simplefilter('default')
warnings.warn('ENV Variable support will be removed by version 20. Stop using: {0}'.format(', '.join(deprecated_env_var_usage)), DeprecationWarning)
update_file_mapping_time = self.get('update_file_mapping_time')
if update_file_mapping_time:
self.set('discover_interval', update_file_mapping_time)
warnings.simplefilter('default')
warnings.warn('"update_file_mapping_time" has been supersceded by "discover_interval". Stop using: "update_file_mapping_time', DeprecationWarning)
def _parse(self, args):
def _main_parser(config):
transpose = ['config', 'confd_path', 'debug', 'daemonize', 'files', 'format', 'fqdn', 'hostname', 'path', 'pid', 'transport']
namspace_dict = vars(args)
for key in transpose:
if key not in namspace_dict or namspace_dict[key] is None or namspace_dict[key] == '':
continue
config[key] = namspace_dict[key]
if args.mode:
config['zeromq_bind'] = args.mode
# HACK: Python 2.6 ConfigParser does not properly
# handle non-string values
for key in config:
if config[key] == '':
config[key] = None
require_bool = ['debug', 'daemonize', 'fqdn', 'rabbitmq_exchange_durable', 'rabbitmq_queue_durable', 'rabbitmq_ha_queue']
for key in require_bool:
config[key] = bool(int(config[key]))
require_int = [
'max_failure',
'max_queue_size',
'queue_timeout',
'rabbitmq_port',
'respawn_delay',
'subprocess_poll_sleep',
'refresh_worker_process',
'tcp_port',
'udp_port',
'wait_timeout',
'zeromq_hwm',
'logstash_version',
]
for key in require_int:
if config[key] is not None:
config[key] = int(config[key])
require_float = [
'update_file_mapping_time',
'discover_interval',
]
for key in require_float:
if config[key] is not None:
config[key] = float(config[key])
if config.get('format') == 'null':
config['format'] = 'raw'
if config['files'] is not None and type(config['files']) == str:
config['files'] = config['files'].split(',')
if config['path'] is not None:
config['path'] = os.path.realpath(config['path'])
if not os.path.isdir(config['path']):
raise LookupError('{0} does not exist'.format(config['path']))
if config.get('hostname') is None:
if config.get('fqdn') is True:
config['hostname'] = socket.getfqdn()
else:
config['hostname'] = socket.gethostname()
if config.get('sincedb_path'):
config['sincedb_path'] = os.path.realpath(config.get('sincedb_path'))
if config['zeromq_address'] and type(config['zeromq_address']) == str:
config['zeromq_address'] = [x.strip() for x in config.get('zeromq_address').split(',')]
if config.get('ssh_options') is not None:
csv = config.get('ssh_options')
config['ssh_options'] = []
if csv == str:
for opt in csv.split(','):
config['ssh_options'].append('-o %s' % opt.strip())
else:
config['ssh_options'] = []
config['globs'] = {}
return config
def _section_parser(config, raise_exceptions=True):
'''Parse a given INI-style config file using ConfigParser module.
Stanza's names match file names, and properties are defaulted as in
http://logstash.net/docs/1.1.1/inputs/file
Config file example:
[/var/log/syslog]
type: syslog
tags: sys,main
[/var/log/auth]
type: syslog
;tags: auth,main
'''
fields = config.get('add_field', '')
if type(fields) != dict:
try:
if type(fields) == str:
fields = filter(None, fields.split(','))
if len(fields) == 0:
config['fields'] = {}
elif (len(fields) % 2) == 1:
if raise_exceptions:
raise Exception('Wrong number of values for add_field')
else:
fieldkeys = fields[0::2]
fieldvalues = [[x] for x in fields[1::2]]
config['fields'] = dict(zip(fieldkeys, fieldvalues))
except TypeError:
config['fields'] = {}
if 'add_field' in config:
del config['add_field']
try:
tags = config.get('tags', '')
if type(tags) == str:
tags = filter(None, tags.split(','))
if len(tags) == 0:
tags = []
config['tags'] = tags
except TypeError:
config['tags'] = []
if config.get('format') == 'null':
config['format'] = 'raw'
file_type = config.get('type', None)
if not file_type:
config['type'] = 'file'
require_bool = ['debug', 'ignore_empty', 'ignore_truncate']
for k in require_bool:
config[k] = bool(int(config[k]))
config['delimiter'] = config['delimiter'].decode('string-escape')
if config['multiline_regex_after']:
config['multiline_regex_after'] = re.compile(config['multiline_regex_after'])
if config['multiline_regex_before']:
config['multiline_regex_before'] = re.compile(config['multiline_regex_before'])
require_int = ['sincedb_write_interval', 'stat_interval', 'tail_lines']
for k in require_int:
config[k] = int(config[k])
return config
conf = Configuration(
name='beaver',
path=self._configfile,
main_defaults=self._main_defaults,
section_defaults=self._section_defaults,
main_parser=_main_parser,
section_parser=_section_parser,
path_from_main='confd_path'
)
config = conf.raw()
self._beaver_config = config['beaver']
self._file_config = config['sections']
self._main_parser = _main_parser(self._main_defaults)
self._section_defaults = _section_parser(self._section_defaults, raise_exceptions=False)
self._files = {}
for section in config['sections']:
globs = eglob(section, config['sections'][section].get('exclude', ''))
if not globs:
self._logger.debug('Skipping glob due to no files found: %s' % section)
continue
for globbed_file in globs:
self._files[os.path.realpath(globbed_file)] = config['sections'][section]
def _update_files(self):
globs = self.get('files', default=[])
files = self.get('files', default=[])
if globs:
globs = dict(zip(globs, [None]*len(globs)))
else:
globs = {}
try:
files.extend(self.getfilepaths())
globs.update(self.getglobs())
except AttributeError:
files = self.getfilepaths()
globs = self.getglobs()
self.set('globs', globs)
self.set('files', files)
for f in files:
if f not in self._file_config:
self._file_config[f] = self._section_defaults
| {
"content_hash": "05a7db7a3194402cf3569aca3adf58fa",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 160,
"avg_line_length": 36.94183445190156,
"alnum_prop": 0.5121419487676376,
"repo_name": "moniker-dns/debian-beaver",
"id": "0af7ebab9419d04861f56d45fdef6c10f6a980d3",
"size": "16537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beaver/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "D",
"bytes": "4149"
},
{
"name": "Python",
"bytes": "121852"
},
{
"name": "Shell",
"bytes": "929"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0003_auto_20170204_1135'),
]
operations = [
migrations.AlterField(
model_name='crpdate',
name='job',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='crpdate', to='company.Job'),
),
]
| {
"content_hash": "9128869984a0cdea8ea500adea038204",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 126,
"avg_line_length": 25.526315789473685,
"alnum_prop": 0.6371134020618556,
"repo_name": "aakashrana1995/svnit-tnp",
"id": "fda6bf22ed6b3e17a092777f9ae257d7b60413e4",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tnp/company/migrations/0004_auto_20170204_1238.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45609"
},
{
"name": "HTML",
"bytes": "111453"
},
{
"name": "JavaScript",
"bytes": "68394"
},
{
"name": "Python",
"bytes": "112993"
}
],
"symlink_target": ""
} |
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = Client(account_sid, auth_token)
activities = client.taskrouter.workspaces(workspace_sid).activities.list()
for activity in activities:
print(activity.friendly_name)
| {
"content_hash": "09ded0a727c5eac18b20c52ec32a97e0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 34.166666666666664,
"alnum_prop": 0.802439024390244,
"repo_name": "teoreteetik/api-snippets",
"id": "6967e238f0b29cd3ab7ddaab23a6bd42fabc4576",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/taskrouter/activities/list/get/example-1/example-1.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
import os
import pytest
from pex import interpreter
from pex.testing import IS_PYPY, ensure_python_interpreter
try:
from mock import patch
except ImportError:
from unittest.mock import patch
class TestPythonInterpreter(object):
@pytest.mark.skipif('sys.version_info >= (3,0)')
def test_all_does_not_raise_with_empty_path_envvar(self):
""" additionally, tests that the module does not raise at import """
with patch.dict(os.environ, clear=True):
reload(interpreter)
interpreter.PythonInterpreter.all()
@pytest.mark.skipif(IS_PYPY)
def test_interpreter_versioning(self):
test_version_tuple = (2, 7, 10)
test_version = '.'.join(str(x) for x in test_version_tuple)
test_interpreter = ensure_python_interpreter(test_version)
py_interpreter = interpreter.PythonInterpreter.from_binary(test_interpreter)
assert py_interpreter.identity.version == test_version_tuple
| {
"content_hash": "d03ce45a938ed47929dda246a81eeb89",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 31.620689655172413,
"alnum_prop": 0.7382769901853872,
"repo_name": "kwlzn/pex",
"id": "cccf4a8dfd04b9f5c02ba1f828ff5079b8ec6106",
"size": "1049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_interpreter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "439852"
},
{
"name": "Shell",
"bytes": "311"
}
],
"symlink_target": ""
} |
from django.http import HttpResponseBadRequest, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from google.appengine.ext import ndb
import datetime
from entities import *
from util import *
from constants import *
@csrf_exempt
def event_refresher(request):
'''
refresh the events every 1 minute to and updates events status by state (expired/live)
:param request:
:return:
'''
TAG = 'EVENT_REFRESHER: '
resolve_expired_events()
resolve_live_events()
return HttpResponse(create_response(OK, []))
def resolve_expired_events():
TAG = 'UPDATE_EVENT_STATUS: '
now_time = datetime.datetime.now()
filtered_events = event.query(event.status == LIVE_EVENT,event.expire_date < now_time)
filtered_cnt = filtered_events.count()
filtered_events_result = filtered_events.fetch()
for iter_event in filtered_events_result:
iter_event.status = EXPIRED_EVENT
iter_event.put()
logging.debug('%s%s expired events has been resolved',TAG,filtered_cnt)
def resolve_live_events():
TAG = 'UPDATE_EVENT_STATUS: '
now_time = datetime.datetime.now()
filtered_events = event.query(ndb.AND(event.status == OPEN_EVENT,
event.start_date < now_time )) # now game is on
filtered_cnt = filtered_events.count()
filtered_events_result = filtered_events.fetch()
for iter_event in filtered_events_result:
if iter_event.expire_date > now_time:
iter_event.status = LIVE_EVENT
iter_event.put()
logging.debug('%s%s live events has been resolved',TAG,filtered_cnt)
@csrf_exempt
def resolve_kick_of_events(request):
'''
notify the user one hour before the kick off
:return:
'''
TAG = 'UPDATE_EVENT_STATUS: '
now_time = datetime.datetime.now()
filtered_events = event.query(ndb.AND(event.status == OPEN_EVENT,
event.start_date < now_time + datetime.timedelta(hours=1))) # less than hour to start
filtered_cnt = filtered_events.count()
filtered_events_result = filtered_events.fetch()
for iter_event in filtered_events_result:
for event_member_key in iter_event.members:
event_member = ndb.Key('account',int(event_member_key.id())).get()
try:
if iter_event.members_count < int(iter_event.min_attend): # notify about closed event
notify_str = "Your event didn't reach minimum required attendance!"
else:
notify_str = "Less than an hour til we kick off!"
send_notifcation_to_user(event_member.notifications_token,
notify_str,
"",
iter_event.custom_to_dict())
except Exception as e:
logging.debug("trying to send notification to user {0} failed\nExcetpions:\n{1}".format(int(event_member_key.id()),e))
continue
logging.debug('%s%s kick off events has been resolved',TAG,filtered_cnt)
return HttpResponse(create_response(OK, []))
@csrf_exempt
def update_events_by_scheme(request):
TAG = 'UPDATE_ALL_BY_SCHEME'
all_events = event.query().fetch()
for iter_event in all_events:
iter_event.put()
return HttpResponse(create_response(OK, []))
@csrf_exempt
def update_users_by_scheme(request):
TAG = 'UPDATE_ALL_BY_SCHEME'
all_users = account.query().fetch()
for iter_user in all_users:
iter_user.put()
return HttpResponse(create_response(OK, [])) | {
"content_hash": "8d470848b87da017ac8459880666cf1b",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 124,
"avg_line_length": 33.708333333333336,
"alnum_prop": 0.703955500618047,
"repo_name": "wattad169/sportbuddy_server",
"id": "469aac00aa9c373b7e8969fa90a9e615573cddd4",
"size": "3236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/cron_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "45763"
},
{
"name": "HTML",
"bytes": "69972"
},
{
"name": "JavaScript",
"bytes": "117960"
},
{
"name": "Python",
"bytes": "4509675"
}
],
"symlink_target": ""
} |
import re
class Want(object):
"""Defines a request, possibly with a specific version"""
def __init__(self,
requirement):
self.requirement = requirement
@property
def tool(self):
return re.findall(r".*?(?=[0-9])", self.requirement + '0')[0]
@property
def version(self):
result = re.findall(r"(?=[0-9]).*", self.requirement)
return result[0] if result else ''
| {
"content_hash": "35323ec5b3e624211857536266d63d93",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 24.22222222222222,
"alnum_prop": 0.5688073394495413,
"repo_name": "koborit/SpaceSwitcherSample",
"id": "569ee11b8367fe883f6f0cbfa68ccb7065ea12b9",
"size": "436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Ecosystem/ecosystem/want.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "887"
},
{
"name": "Python",
"bytes": "20750"
},
{
"name": "Shell",
"bytes": "837"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import os
import sys
import argparse
from .. import prov_definition
from .. import prov_deployment
from .. import prov_execution
from .. import utils
from ..persistence import persistence
from .command import Command
LAST_TRIAL = '.last_trial'
def non_negative(string):
value = int(string)
if value < 0:
raise argparse.ArgumentTypeError(
"%s is not a non-negative integer value" % string)
return value
class Run(Command):
def add_arguments(self):
add_arg = self.add_argument
add_cmd = self.add_argument_cmd
add_arg('-v', '--verbose', action='store_true',
help='increase output verbosity')
add_arg('-b', '--bypass-modules', action='store_true',
help='bypass module dependencies analysis, assuming that no '
'module changes occurred since last execution')
add_arg('-c', '--depth-context', choices=['non-user', 'all'],
default='non-user',
help='functions subject to depth computation when capturing '
'activations (defaults to non-user)')
add_arg('-d', '--depth', type=non_negative, default=1,
help='depth for capturing function activations (defaults to '
'1)')
add_arg('-e', '--execution-provenance', default="Profiler",
choices=['Profiler', 'InspectProfiler', 'Tracer'],
help='execution provenance provider. (defaults to Profiler)')
add_arg('--disasm', action='store_true',
help='show script disassembly')
add_arg('--meta', action='store_true',
help='exeute noWorkflow meta profiler')
add_arg('--name', type=str,
help="set branch name used for tracking history")
add_arg('--dir', type=str,
help='set project path. The noworkflow database folder will '
'be created in this path. Default to script directory')
add_cmd('--create_last', action='store_true')
add_cmd('script', nargs=argparse.REMAINDER,
help='Python script to be executed')
def execute(self, args):
if args.meta:
utils.meta_profiler.active = True
utils.meta_profiler.data['cmd'] = ' '.join(sys.argv)
utils.verbose = args.verbose
utils.print_msg('removing noWorkflow boilerplate')
args_script = args.script
args.script = os.path.realpath(args.script[0])
if not os.path.exists(args.script): # TODO: check this using argparse
utils.print_msg('the script does not exist', True)
sys.exit(1)
script_dir = args.dir or os.path.dirname(args.script)
# Replace now's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(args.script)
# Clear argv
sys.argv = args_script
# Clear up the __main__ namespace
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({'__name__' : '__main__',
'__file__' : args.script,
'__builtins__': __builtins__,
})
with open(args.script, 'rb') as f:
metascript = {
'trial_id': None,
'code': f.read(),
'path': args.script,
'compiled': None,
'definition': None,
'name': args.name or os.path.basename(sys.argv[0])
}
try:
self.run(script_dir, args, metascript, __main__.__dict__)
finally:
if args.create_last:
tmp = os.path.join(os.path.dirname(args.script), LAST_TRIAL)
with open(tmp, 'w') as f:
f.write(str(metascript['trial_id']))
def run(self, script_dir, args, metascript, ns):
utils.print_msg('setting up local provenance store')
persistence.connect(script_dir)
utils.print_msg('collecting definition provenance')
prov_definition.collect_provenance(args, metascript)
utils.print_msg('collecting deployment provenance')
prov_deployment.collect_provenance(args, metascript)
utils.print_msg('collection execution provenance')
prov_execution.collect_provenance(args, metascript, ns)
utils.meta_profiler.save()
return prov_execution.provider
| {
"content_hash": "6f554a44be7cfda477f00a27a9c7aeed",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 37.04032258064516,
"alnum_prop": 0.5649902024820379,
"repo_name": "paopao74cn/noworkflow",
"id": "3338861c2d3bfc7f40df8c125313cdbb9d5fd64f",
"size": "4812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture/noworkflow/now/cmd/cmd_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37541"
},
{
"name": "HTML",
"bytes": "18438"
},
{
"name": "Java",
"bytes": "20193"
},
{
"name": "JavaScript",
"bytes": "52235"
},
{
"name": "Prolog",
"bytes": "6207"
},
{
"name": "Python",
"bytes": "266220"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
import pytest
from cryptography.exceptions import AlreadyFinalized, InvalidSignature
from cryptography.hazmat.primitives import hashes, interfaces
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.utils import bit_length
from .fixtures_dsa import (
DSA_KEY_1024, DSA_KEY_2048, DSA_KEY_3072
)
from ...utils import (
der_encode_dsa_signature, load_fips_dsa_key_pair_vectors,
load_fips_dsa_sig_vectors, load_vectors_from_file,
)
@pytest.mark.dsa
class TestDSA(object):
def test_generate_dsa_parameters(self, backend):
parameters = dsa.generate_parameters(1024, backend)
assert isinstance(parameters, interfaces.DSAParameters)
def test_generate_invalid_dsa_parameters(self, backend):
with pytest.raises(ValueError):
dsa.generate_parameters(1, backend)
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join(
"asymmetric", "DSA", "FIPS_186-3", "KeyPair.rsp"),
load_fips_dsa_key_pair_vectors
)
)
def test_generate_dsa_keys(self, vector, backend):
parameters = dsa.DSAParameterNumbers(
p=vector['p'],
q=vector['q'],
g=vector['g']
).parameters(backend)
skey = parameters.generate_private_key()
if isinstance(skey, interfaces.DSAPrivateKeyWithNumbers):
numbers = skey.private_numbers()
skey_parameters = numbers.public_numbers.parameter_numbers
pkey = skey.public_key()
parameters = pkey.parameters()
parameter_numbers = parameters.parameter_numbers()
assert parameter_numbers.p == skey_parameters.p
assert parameter_numbers.q == skey_parameters.q
assert parameter_numbers.g == skey_parameters.g
assert skey_parameters.p == vector['p']
assert skey_parameters.q == vector['q']
assert skey_parameters.g == vector['g']
assert skey.key_size == bit_length(vector['p'])
assert pkey.key_size == skey.key_size
public_numbers = pkey.public_numbers()
assert numbers.public_numbers.y == public_numbers.y
assert numbers.public_numbers.y == pow(
skey_parameters.g, numbers.x, skey_parameters.p
)
def test_generate_dsa_private_key_and_parameters(self, backend):
skey = dsa.generate_private_key(1024, backend)
assert skey
if isinstance(skey, interfaces.DSAPrivateKeyWithNumbers):
numbers = skey.private_numbers()
skey_parameters = numbers.public_numbers.parameter_numbers
assert numbers.public_numbers.y == pow(
skey_parameters.g, numbers.x, skey_parameters.p
)
def test_invalid_parameters_values(self, backend):
# Test a p < 1024 bits in length
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=2 ** 1000,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
).parameters(backend)
# Test a p < 2048 bits in length
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=2 ** 2000,
q=DSA_KEY_2048.public_numbers.parameter_numbers.q,
g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
).parameters(backend)
# Test a p < 3072 bits in length
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=2 ** 3000,
q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
).parameters(backend)
# Test a p > 3072 bits in length
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=2 ** 3100,
q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
).parameters(backend)
# Test a q < 160 bits in length
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=2 ** 150,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
).parameters(backend)
# Test a q < 256 bits in length
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=DSA_KEY_2048.public_numbers.parameter_numbers.p,
q=2 ** 250,
g=DSA_KEY_2048.public_numbers.parameter_numbers.g
).parameters(backend)
# Test a q > 256 bits in length
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=DSA_KEY_3072.public_numbers.parameter_numbers.p,
q=2 ** 260,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
).parameters(backend)
# Test a g < 1
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=0
).parameters(backend)
# Test a g = 1
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=1
).parameters(backend)
# Test a g > p
with pytest.raises(ValueError):
dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=2 ** 1200
).parameters(backend)
def test_invalid_dsa_private_key_arguments(self, backend):
# Test a p < 1024 bits in length
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 1000,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
),
x=DSA_KEY_1024.x
).private_key(backend)
# Test a p < 2048 bits in length
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 2000,
q=DSA_KEY_2048.public_numbers.parameter_numbers.q,
g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_2048.public_numbers.y
),
x=DSA_KEY_2048.x,
).private_key(backend)
# Test a p < 3072 bits in length
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 3000,
q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_3072.public_numbers.y
),
x=DSA_KEY_3072.x,
).private_key(backend)
# Test a p > 3072 bits in length
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 3100,
q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_3072.public_numbers.y
),
x=DSA_KEY_3072.x,
).private_key(backend)
# Test a q < 160 bits in length
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=2 ** 150,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
),
x=DSA_KEY_1024.x,
).private_key(backend)
# Test a q < 256 bits in length
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_2048.public_numbers.parameter_numbers.p,
q=2 ** 250,
g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_2048.public_numbers.y
),
x=DSA_KEY_2048.x,
).private_key(backend)
# Test a q > 256 bits in length
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_3072.public_numbers.parameter_numbers.p,
q=2 ** 260,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_3072.public_numbers.y
),
x=DSA_KEY_3072.x,
).private_key(backend)
# Test a g < 1
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=0,
),
y=DSA_KEY_1024.public_numbers.y
),
x=DSA_KEY_1024.x,
).private_key(backend)
# Test a g = 1
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=1,
),
y=DSA_KEY_1024.public_numbers.y
),
x=DSA_KEY_1024.x,
).private_key(backend)
# Test a g > p
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=2 ** 1200,
),
y=DSA_KEY_1024.public_numbers.y
),
x=DSA_KEY_1024.x,
).private_key(backend)
# Test x = 0
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
),
x=0,
).private_key(backend)
# Test x < 0
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
),
x=-2,
).private_key(backend)
# Test x = q
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
),
x=2 ** 159,
).private_key(backend)
# Test x > q
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
),
x=2 ** 200,
).private_key(backend)
# Test y != (g ** x) % p
with pytest.raises(ValueError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=2 ** 100
),
x=DSA_KEY_1024.x,
).private_key(backend)
# Test a non-integer y value
with pytest.raises(TypeError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=None
),
x=DSA_KEY_1024.x,
).private_key(backend)
# Test a non-integer x value
with pytest.raises(TypeError):
dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
),
x=None,
).private_key(backend)
def test_invalid_dsa_public_key_arguments(self, backend):
# Test a p < 1024 bits in length
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 1000,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
).public_key(backend)
# Test a p < 2048 bits in length
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 2000,
q=DSA_KEY_2048.public_numbers.parameter_numbers.q,
g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_2048.public_numbers.y
).public_key(backend)
# Test a p < 3072 bits in length
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 3000,
q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_3072.public_numbers.y
).public_key(backend)
# Test a p > 3072 bits in length
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=2 ** 3100,
q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_3072.public_numbers.y
).public_key(backend)
# Test a q < 160 bits in length
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=2 ** 150,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_1024.public_numbers.y
).public_key(backend)
# Test a q < 256 bits in length
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_2048.public_numbers.parameter_numbers.p,
q=2 ** 250,
g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_2048.public_numbers.y
).public_key(backend)
# Test a q > 256 bits in length
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_3072.public_numbers.parameter_numbers.p,
q=2 ** 260,
g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
),
y=DSA_KEY_3072.public_numbers.y
).public_key(backend)
# Test a g < 1
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=0,
),
y=DSA_KEY_1024.public_numbers.y
).public_key(backend)
# Test a g = 1
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=1,
),
y=DSA_KEY_1024.public_numbers.y
).public_key(backend)
# Test a g > p
with pytest.raises(ValueError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=2 ** 1200,
),
y=DSA_KEY_1024.public_numbers.y
).public_key(backend)
# Test a non-integer y value
with pytest.raises(TypeError):
dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
),
y=None
).public_key(backend)
@pytest.mark.dsa
class TestDSAVerification(object):
_algorithms_dict = {
'SHA1': hashes.SHA1,
'SHA224': hashes.SHA224,
'SHA256': hashes.SHA256,
'SHA384': hashes.SHA384,
'SHA512': hashes.SHA512
}
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join(
"asymmetric", "DSA", "FIPS_186-3", "SigVer.rsp"),
load_fips_dsa_sig_vectors
)
)
def test_dsa_verification(self, vector, backend):
digest_algorithm = vector['digest_algorithm'].replace("-", "")
algorithm = self._algorithms_dict[digest_algorithm]
if (
not backend.dsa_parameters_supported(
vector['p'], vector['q'], vector['g']
) or not backend.dsa_hash_supported(algorithm)
):
pytest.skip(
"{0} does not support the provided parameters".format(backend)
)
public_key = dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
vector['p'], vector['q'], vector['g']
),
y=vector['y']
).public_key(backend)
sig = der_encode_dsa_signature(vector['r'], vector['s'])
verifier = public_key.verifier(sig, algorithm())
verifier.update(vector['msg'])
if vector['result'] == "F":
with pytest.raises(InvalidSignature):
verifier.verify()
else:
verifier.verify()
def test_dsa_verify_invalid_asn1(self, backend):
public_key = DSA_KEY_1024.public_numbers.public_key(backend)
verifier = public_key.verifier(b'fakesig', hashes.SHA1())
verifier.update(b'fakesig')
with pytest.raises(InvalidSignature):
verifier.verify()
def test_use_after_finalize(self, backend):
public_key = DSA_KEY_1024.public_numbers.public_key(backend)
verifier = public_key.verifier(b'fakesig', hashes.SHA1())
verifier.update(b'irrelevant')
with pytest.raises(InvalidSignature):
verifier.verify()
with pytest.raises(AlreadyFinalized):
verifier.verify()
with pytest.raises(AlreadyFinalized):
verifier.update(b"more data")
@pytest.mark.dsa
class TestDSASignature(object):
_algorithms_dict = {
'SHA1': hashes.SHA1,
'SHA224': hashes.SHA224,
'SHA256': hashes.SHA256,
'SHA384': hashes.SHA384,
'SHA512': hashes.SHA512}
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join(
"asymmetric", "DSA", "FIPS_186-3", "SigGen.txt"),
load_fips_dsa_sig_vectors
)
)
def test_dsa_signing(self, vector, backend):
digest_algorithm = vector['digest_algorithm'].replace("-", "")
algorithm = self._algorithms_dict[digest_algorithm]
if (
not backend.dsa_parameters_supported(
vector['p'], vector['q'], vector['g']
) or not backend.dsa_hash_supported(algorithm)
):
pytest.skip(
"{0} does not support the provided parameters".format(backend)
)
private_key = dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
vector['p'], vector['q'], vector['g']
),
y=vector['y']
),
x=vector['x']
).private_key(backend)
signer = private_key.signer(algorithm())
signer.update(vector['msg'])
signature = signer.finalize()
assert signature
public_key = private_key.public_key()
verifier = public_key.verifier(signature, algorithm())
verifier.update(vector['msg'])
verifier.verify()
def test_use_after_finalize(self, backend):
private_key = DSA_KEY_1024.private_key(backend)
signer = private_key.signer(hashes.SHA1())
signer.update(b"data")
signer.finalize()
with pytest.raises(AlreadyFinalized):
signer.finalize()
with pytest.raises(AlreadyFinalized):
signer.update(b"more data")
class TestDSANumbers(object):
def test_dsa_parameter_numbers(self):
parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3)
assert parameter_numbers.p == 1
assert parameter_numbers.q == 2
assert parameter_numbers.g == 3
def test_dsa_parameter_numbers_invalid_types(self):
with pytest.raises(TypeError):
dsa.DSAParameterNumbers(p=None, q=2, g=3)
with pytest.raises(TypeError):
dsa.DSAParameterNumbers(p=1, q=None, g=3)
with pytest.raises(TypeError):
dsa.DSAParameterNumbers(p=1, q=2, g=None)
def test_dsa_public_numbers(self):
parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3)
public_numbers = dsa.DSAPublicNumbers(
y=4,
parameter_numbers=parameter_numbers
)
assert public_numbers.y == 4
assert public_numbers.parameter_numbers == parameter_numbers
def test_dsa_public_numbers_invalid_types(self):
with pytest.raises(TypeError):
dsa.DSAPublicNumbers(y=4, parameter_numbers=None)
with pytest.raises(TypeError):
parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3)
dsa.DSAPublicNumbers(y=None, parameter_numbers=parameter_numbers)
def test_dsa_private_numbers(self):
parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3)
public_numbers = dsa.DSAPublicNumbers(
y=4,
parameter_numbers=parameter_numbers
)
private_numbers = dsa.DSAPrivateNumbers(
x=5,
public_numbers=public_numbers
)
assert private_numbers.x == 5
assert private_numbers.public_numbers == public_numbers
def test_dsa_private_numbers_invalid_types(self):
parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3)
public_numbers = dsa.DSAPublicNumbers(
y=4,
parameter_numbers=parameter_numbers
)
with pytest.raises(TypeError):
dsa.DSAPrivateNumbers(x=4, public_numbers=None)
with pytest.raises(TypeError):
dsa.DSAPrivateNumbers(x=None, public_numbers=public_numbers)
| {
"content_hash": "0c1eba19c033473e0b332bd83660396b",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 78,
"avg_line_length": 39.17024320457797,
"alnum_prop": 0.5392987582176771,
"repo_name": "dstufft/cryptography",
"id": "14b24d6920f8e7c000f2d212618c70d8a04d7378",
"size": "27927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/hazmat/primitives/test_dsa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1108"
},
{
"name": "C++",
"bytes": "686"
},
{
"name": "Go",
"bytes": "4062"
},
{
"name": "Python",
"bytes": "941021"
},
{
"name": "Shell",
"bytes": "8542"
}
],
"symlink_target": ""
} |
"""
Django settings for jetere project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6qz0hx=7$06q2hiicm8kojcxoo!56o)7wnke*tlevdm=#v&j=&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'reports.apps.ReportsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jetere.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jetere.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Israel'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.expanduser('~/.jetere/root/static')
| {
"content_hash": "8b2d73490037191c8d6c1515477cb404",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 91,
"avg_line_length": 26.51219512195122,
"alnum_prop": 0.6915056731064091,
"repo_name": "idanmo/jetere",
"id": "12301a5cedec59fb1b40c4882a4ad70095011fa1",
"size": "3261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jetere/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "955"
},
{
"name": "HTML",
"bytes": "10380"
},
{
"name": "Python",
"bytes": "25366"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from builtins import object
from lib.common import helpers
class Stager(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'AppleScript',
'Author': ['@harmj0y'],
'Description': ('Generates AppleScript to execute the Empire stage0 launcher.'),
'Comments': [
''
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'python'
},
'OutFile' : {
'Description' : 'File to output AppleScript to, otherwise displayed on the screen.',
'Required' : False,
'Value' : ''
},
'SafeChecks' : {
'Description' : 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.',
'Required' : True,
'Value' : 'True'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
safeChecks = self.options['SafeChecks']['Value']
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language=language, encode=True, userAgent=userAgent, safeChecks=safeChecks)
if launcher == "":
print(helpers.color("[!] Error in launcher command generation."))
return ""
else:
launcher = launcher.replace('"', '\\"')
applescript = "do shell script \"%s\"" % (launcher)
return applescript
| {
"content_hash": "48dcddfe900d24f2f229cfafdc9ed8c5",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 148,
"avg_line_length": 34.83132530120482,
"alnum_prop": 0.5029401591144933,
"repo_name": "byt3bl33d3r/Empire",
"id": "1817cdab4822231a062069e3eb83575f1ece8b12",
"size": "2891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/stagers/osx/applescript.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gitmill.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "c6000bdf5628ec6efd68ffd1a39222fd",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.7105263157894737,
"repo_name": "gitmill/gitmill",
"id": "a873f8449378f68a5aae483e5bc90c9251cb3445",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27514"
},
{
"name": "CoffeeScript",
"bytes": "3686"
},
{
"name": "JavaScript",
"bytes": "2405072"
},
{
"name": "Python",
"bytes": "27896"
},
{
"name": "Shell",
"bytes": "2510"
}
],
"symlink_target": ""
} |
import unittest
from flexfit.base import BaseParameter
from flexfit.lineshapes import lorentzian
from flexfit.model import model
from flexfit.algorithms import levenberg_marquardt
import numpy as np
import copy
class TestAlgorithm(unittest.TestCase):
def test_levenberg_marquardt(self):
'''
can I set up a model?
'''
parameter_names = ['position','amplitude','width']
parameter_values = [256, 100, 10]
parameter_state = ['fix','fit','fit']
a = BaseParameter()
x = np.arange(512.0)
a.set_parameters(parameter_names,parameter_values, parameter_state)
l1 = lorentzian()
parameter_names = ['position','amplitude','width']
parameter_values = [280, 100, 10]
parameter_state = ['fix','fit','fit']
b = BaseParameter() ## we should subclass this so that we have curve specific parameter sets.
b.set_parameters(parameter_names,parameter_values, parameter_state)
l2 = lorentzian()
big_one = model()
big_one.set_axis(x)
big_one.add_curve(l1,a)
big_one.add_curve(l2,b)
big_one.initialise_model()# gets the first model and the jacobian of the fitting parameters
fitter = levenberg_marquardt()
fitter.set_model(big_one)
data_one = copy.deepcopy(big_one)
data_one.update_model([120,20,90,5])# perturb the data somewhat
fitter.set_data(data_one.get_full_function())## need to add some data here!
import time
tic = time.time()
fitter.fit() # this should do the job
toc = time.time()
print str((toc-tic)*1e3)+'ms'
print fitter.get_full_output() # looks like it worked.
### CHECK THIS IS ACTUALLY FITTING!! TEST HERE! ###
### MAYBE WE CAN CATCH THE TIME AS A LOGGABLE? ###
### WE NEED TO FIND SOMEWAY OF REALISTICALLY OUTPUTTING A RESULT, maybe with a pretty print report? ###
### It would certainly be nice to report each curve individually too.
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b67f84e7c964651532647e2ef61a4112",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 111,
"avg_line_length": 42.42857142857143,
"alnum_prop": 0.6286676286676287,
"repo_name": "parsonsaaron/FlexFit",
"id": "0b9d1d6b8d1d98dc89ec3888d1bc3c825c0c30a7",
"size": "2082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_algorithm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import sys
from mox3 import mox
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0 import quota as test_quota
from neutronclient.tests.unit import test_cli20
class CLITestV20Quota(test_cli20.CLITestV20Base):
def test_show_quota(self):
resource = 'quota'
cmd = test_quota.ShowQuota(
test_cli20.MyApp(sys.stdout), None)
args = ['--tenant-id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args)
def test_update_quota(self):
resource = 'quota'
cmd = test_quota.UpdateQuota(
test_cli20.MyApp(sys.stdout), None)
args = ['--tenant-id', self.test_id, '--network', 'test']
self.assertRaises(
exceptions.NeutronClientException, self._test_update_resource,
resource, cmd, self.test_id, args=args,
extrafields={'network': 'new'})
def test_delete_quota_get_parser(self):
cmd = test_cli20.MyApp(sys.stdout)
test_quota.DeleteQuota(cmd, None).get_parser(cmd)
def test_show_quota_positional(self):
resource = 'quota'
cmd = test_quota.ShowQuota(
test_cli20.MyApp(sys.stdout), None)
args = [self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args)
def test_update_quota_positional(self):
resource = 'quota'
cmd = test_quota.UpdateQuota(
test_cli20.MyApp(sys.stdout), None)
args = [self.test_id, '--network', 'test']
self.assertRaises(
exceptions.NeutronClientException, self._test_update_resource,
resource, cmd, self.test_id, args=args,
extrafields={'network': 'new'})
def test_show_quota_default(self):
resource = 'quota'
cmd = test_quota.ShowQuotaDefault(
test_cli20.MyApp(sys.stdout), None)
args = ['--tenant-id', self.test_id]
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
expected_res = {'quota': {'port': 50, 'network': 10, 'subnet': 10}}
resstr = self.client.serialize(expected_res)
path = getattr(self.client, "quota_default_path")
return_tup = (test_cli20.MyResp(200), resstr)
self.client.httpclient.request(
test_cli20.end_url(path % self.test_id), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(return_tup)
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("test_" + resource)
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('network', _str)
self.assertIn('subnet', _str)
self.assertIn('port', _str)
self.assertNotIn('subnetpool', _str)
def test_update_quota_noargs(self):
resource = 'quota'
cmd = test_quota.UpdateQuota(test_cli20.MyApp(sys.stdout), None)
args = [self.test_id]
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, self.test_id, args=args,
extrafields=None)
| {
"content_hash": "b67d11179a9744cb646ef525ade33296",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 39.104651162790695,
"alnum_prop": 0.6095747844186739,
"repo_name": "Juniper/python-neutronclient",
"id": "78f4daefe86b822b9787ea86dec20c02c2941e88",
"size": "4016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutronclient/tests/unit/test_quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1293485"
},
{
"name": "Shell",
"bytes": "9630"
}
],
"symlink_target": ""
} |
def extractHaikushilanWordpressCom(item):
'''
Parser for 'haikushilan.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('saye', 'SAYE 「Run Freely」', 'translated'),
('idiot', 'Idiot', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "306ab689a30d170ce35cf1367da8602b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 104,
"avg_line_length": 31.59090909090909,
"alnum_prop": 0.581294964028777,
"repo_name": "fake-name/ReadableWebProxy",
"id": "aa3b6c569dd94e6141cea0db6816d0c1eb2171ff",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractHaikushilanWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import numpy as np
import datetime as dtm
from dateutil import rrule
import pandas as pd
import csv
import matplotlib.pylab as plt
import sys, os
#lets first create the csv file
#
#change this to actual csv file name
pingfile="weeklylogs.csv"
#paramters @plotinterval = 10 minutes
plotinterval = 10
#csv file columns
col_seq=0
col_pingtime=1
col_domain=2
col_state=3
#
########## FUNCTION TO SYNTHESEIZE MISSING DATA POINTS ##########
#
def synth_data(synthdf, interval):
#create a temporary dataframe to hold the syntheseized data
tmpdf = pd.DataFrame(columns=['seqnum', 'pingdatetime', 'domain', 'statenow'])
#first check we have a none empty dataframe
if not synthdf.empty:
#pick the originating TS data point
synthdf.sort_values(by='pingdatetime')
#check if first timestamp starts at 00:00:00; if not add a dumy record
startseqnum = synthdf.index[0]
startpingdt = synthdf.iloc[0]['pingdatetime']
startdomain = synthdf.iloc[0]['domain']
startstate = synthdf.iloc[0]['statenow']
#loop through each TS data point to synthetically add new TS points
#to fill the gap between two consecutive data points
for i, row in synthdf.iterrows():
#initiate the synthesiezed data point to the origin
nextdatapoint = 0
pingdt_plus_interval = startpingdt
#stepwise loop to add syntheseized points from relative origin to the next TS data point
while row['pingdatetime'] > pingdt_plus_interval + dtm.timedelta(minutes = interval) :
nextdatapoint += 1
pingdt_plus_interval = startpingdt + dtm.timedelta(minutes = nextdatapoint*interval)
tmpdf.loc[len(tmpdf.index)] = [startseqnum,pingdt_plus_interval,startdomain,startstate]
startseqnum = i
startpingdt = row['pingdatetime']
startstate = row['statenow']
#after completing through all the TS datapoints check if a none empty dataframe was created
if not tmpdf.empty:
tmpdf = pd.concat([tmpdf,synthdf])
tmpdf = tmpdf.set_index('seqnum')
#whether null or not return a dataframe with syntheseized TS data
tmpdf.dropna(thresh=2)
return tmpdf
#
########## PLOT HISTOGRAM TO FIGURE ##########
#
def plot_hist_to_fig(histdf, dname):
#get date range of the plot to use in suptitile
begdt = histdf['pingdatetime'].min().date()
findt = histdf['pingdatetime'].max().date()
#create a new x-axis index using dataframe index; starting from 1 instead of 0
histdf['pingdate'] = histdf['pingdatetime'].apply(lambda x: x.date())
downdf = pd.DataFrame(columns=['xlabel','pingdate', 'downcount'])
datelist = list(histdf.pingdate.unique())
for uniquedate in datelist:
xlabel = str('{:02d}'.format(uniquedate.month))+'-'+str('{:02d}'.format(uniquedate.day))
downcount = len(histdf[(histdf.statenow == '0') & (histdf.pingdate == uniquedate)])
totalcount = len(histdf[(histdf.pingdate == uniquedate)])
downdf.loc[len(downdf.index)] = [xlabel, uniquedate,100*downcount//totalcount]
downdf = downdf.as_matrix()
#x-axis values are in the newly generated xvalues column
xl = np.array(downdf[:,0])
x = np.array(downdf[:,1])
#y-axis values (1 or 0) are in the dateframe statenow column
y = np.array(downdf[:,2])
histfig, ax = plt.subplots()
ax.bar(x,y,color='red',width=0.5, align="center")
#to give enough spacing for the suptitle; otherwise overlaps with title
histfig.subplots_adjust(top=0.87)
# plt.figure(figsize=(8,6), dpi=150)
#beautify the plot and name the labels, titles
ax.set_title('Percentage of time Server Failed each Day', fontsize=14, fontweight='bold', color='gray')
histfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue')
ax.set_xlabel('Month-Day', fontsize=12, color='gray')
ax.set_ylabel('Faile Rate (%)', fontsize=12, color='gray')
plt.yticks(fontsize=10, color='gray', rotation='horizontal')
plt.xticks(x, xl, fontsize=10, color='gray', rotation='vertical')
ax.grid(True)
return histfig
#
########## PLOT DOWN TIMES FREQUENCY TO FIGURE ##########
#
def plot_freq_to_fig(plotdf, dname):
#get date range of the plot to use in suptitile
begdt = plotdf['pingdatetime'].min().date()
findt = plotdf['pingdatetime'].max().date()
failrate = 100-(sum(100*plotdf['statenow'].astype(int))/len(plotdf))
failrate = failrate.astype(float)
#create a new x-axis index using dataframe index; starting from 1 instead of 0
plotdf['xvalues'] = range(1,len(plotdf)+1)
plotdf = plotdf.as_matrix()
#x-axis values are in the newly generated xvalues column
x = np.array(plotdf[:,3].astype(int))
#y-axis values (1 or 0) are in the dateframe statenow column
y = np.array(plotdf[:,2].astype(int))
#setup to catputure the plot into a figure
plotfig = plt.figure(num=None, figsize=(8, 6), dpi=150, facecolor='y', edgecolor='k')
ax = plotfig.add_subplot(311)
ax.fill_between(x, 0, y, color='green')
ax.plot(x,y,color='green',lw=2)
#to give enough spacing for the suptitle; otherwise overlaps with title
plotfig.subplots_adjust(top=0.87)
#beautify the plot and name the labels, titles
ax.set_title('Frequency of Server Access Failure ('+str(failrate)+'%)', fontsize=14, fontweight='bold', color='gray')
plotfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue')
ax.set_xlabel('Attempted Machine Accesss Times', fontsize=12, color='gray')
ax.set_ylabel('Machine State', fontsize=12, color='gray')
plt.yticks(y, ['UP','DOWN'], fontsize=10, color='gray', rotation='vertical')
plt.xticks(fontsize=10, color='gray', rotation='horizontal')
plt.ylim(0,1.1)
plt.xlim(0,x.max()+10)
ax.grid(True)
return plotfig
#
############# MAIN ################################
#
print("Complile data from file the log files")
#os.system('./analytics.sh')
print("Reading data from file "+pingfile)
with open(pingfile, 'rb') as f:
data = [i.split(",") for i in f.read().split()]
df = pd.DataFrame(data, columns=['seqnum', 'pingdatetime', 'domain', 'statenow'])
for index, row in df.iterrows():
row[col_pingtime] = dtm.datetime.strptime(row[col_pingtime], '%Y-%m-%d:%H:%M:%S')
#to avoid duplicate data and to reflect ping time to be on the minute
row[col_pingtime] = row[col_pingtime].replace(second = 0)
#format pingdatetime as proper datetime, set it as the indext and then order them
df['pingdatetime'] = pd.to_datetime(df['pingdatetime'])
df.sort_values(by='pingdatetime')
df = df.set_index('seqnum')
#begin processing for each unique domain
print(str(len(df.index))+" data rows added to the dataframe, ready for processing ...")
print ('-----------------------------------------------------')
for thedomain in df.domain.unique():
#insert syntheseised data points
dompingdf = df[df['domain']==thedomain]
print("Begin data synthesis for "+thedomain+" with data rows = "+str(len(dompingdf.index)))
amenddf = synth_data(dompingdf,plotinterval)
if not amenddf.empty:
#output the syntheseized dataframe to output file
print(str(len(amenddf.index))+" data rows of syntheseised added to "+thedomain )
amenddf['pingdatetime'] = pd.to_datetime(amenddf.pingdatetime)
amenddf = amenddf.sort(['pingdatetime'])
amenddf.index = range(0,len(amenddf))
print('writing data to file: ./data/syndata_'+thedomain+'.csv')
amenddf.to_csv('./data/syndata_'+thedomain+'.csv')
#plot timeseries with function (need to add if conditions to check if function returns valid fig)
fig = plot_freq_to_fig(amenddf, thedomain)
fig.savefig('./plots/freqplot_'+thedomain+'.png', bbox_inches='tight')
print ('frequency plot created in file: ./plots/freqplot_'+thedomain+'.png')
fig = plot_hist_to_fig(amenddf, thedomain)
fig.savefig('./plots/histplot_'+thedomain+'.png', bbox_inches='tight')
print ('histogram plot created in file: ./plots/histplot_'+thedomain+'.png')
print ('process complete for '+thedomain)
print ('-----------------------------------------------------')
else:
print ("Warning: no syntheseized data was added to: "+thedomain)
print ('-----------------------------------------------------')
print ('End processing data for visualization !!! ')
| {
"content_hash": "00dc5c445adaca8ea2c79dcae287ca33",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 121,
"avg_line_length": 49.53142857142857,
"alnum_prop": 0.639132441162898,
"repo_name": "waidyanatha/pingsam",
"id": "44e045deef397f388e079c0452f9b7433bab4e91",
"size": "8668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8668"
},
{
"name": "Shell",
"bytes": "11516"
}
],
"symlink_target": ""
} |
"""
Description:
Provides a pyGtk vtkRenderWindowInteractor widget. This embeds a
vtkRenderWindow inside a GTK widget and uses the
vtkGenericRenderWindowInteractor for the event handling. This is
similar to GtkVTKRenderWindowInteractor.py.
The extensions here allow the use of gtkglext rather than gtkgl and
pygtk-2 rather than pygtk-0. It requires pygtk-2.0.0 or later.
There is a working example at the bottom.
Credits:
John Hunter <[email protected]> developed and tested
this code based on VTK's GtkVTKRenderWindow.py and extended it to
work with pygtk-2.0.0.
License:
VTK license.
"""
import sys
import pygtk
pygtk.require('2.0')
import gtk
from gtk import gdk
import gtk.gtkgl
import vtk
class GtkGLExtVTKRenderWindowInteractor(gtk.gtkgl.DrawingArea):
""" Embeds a vtkRenderWindow into a pyGTK widget and uses
vtkGenericRenderWindowInteractor for the event handling. This
class embeds the RenderWindow correctly. A __getattr__ hook is
provided that makes the class behave like a
vtkGenericRenderWindowInteractor."""
def __init__(self, *args):
gtk.gtkgl.DrawingArea.__init__(self)
self.set_double_buffered(gtk.FALSE)
self._RenderWindow = vtk.vtkRenderWindow()
# private attributes
self.__Created = 0
self._ActiveButton = 0
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
self._Iren.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self.ConnectSignals()
# need this to be able to handle key_press events.
self.set_flags(gtk.CAN_FOCUS)
def set_size_request(self, w, h):
gtk.gtkgl.DrawingArea.set_size_request(self, w, h)
self._RenderWindow.SetSize(w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
def ConnectSignals(self):
self.connect("realize", self.OnRealize)
self.connect("expose_event", self.OnExpose)
self.connect("configure_event", self.OnConfigure)
self.connect("button_press_event", self.OnButtonDown)
self.connect("button_release_event", self.OnButtonUp)
self.connect("motion_notify_event", self.OnMouseMove)
self.connect("enter_notify_event", self.OnEnter)
self.connect("leave_notify_event", self.OnLeave)
self.connect("key_press_event", self.OnKeyPress)
self.connect("delete_event", self.OnDestroy)
self.add_events(gdk.EXPOSURE_MASK| gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.KEY_PRESS_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK |
gdk.ENTER_NOTIFY_MASK | gdk.LEAVE_NOTIFY_MASK)
def __getattr__(self, attr):
"""Makes the object behave like a
vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ +
" has no attribute named " + attr)
def CreateTimer(self, obj, event):
gtk.timeout_add(10, self._Iren.TimerEvent)
def DestroyTimer(self, obj, event):
"""The timer is a one shot timer so will expire automatically."""
return 1
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
if self.__Created:
self._RenderWindow.Render()
def OnRealize(self, *args):
if self.__Created == 0:
# you can't get the xid without the window being realized.
self.realize()
if sys.platform=='win32':
win_id = str(self.widget.window.handle)
else:
win_id = str(self.widget.window.xid)
self._RenderWindow.SetWindowInfo(win_id)
#self._Iren.Initialize()
self.__Created = 1
return gtk.TRUE
def OnConfigure(self, widget, event):
self.widget=widget
self._Iren.SetSize(event.width, event.height)
self._Iren.ConfigureEvent()
self.Render()
return gtk.TRUE
def OnExpose(self, *args):
self.Render()
return gtk.TRUE
def OnDestroy(self, event=None):
self.hide()
del self._RenderWindow
self.destroy()
return gtk.TRUE
def _GetCtrlShift(self, event):
ctrl, shift = 0, 0
if ((event.state & gdk.CONTROL_MASK) == gdk.CONTROL_MASK):
ctrl = 1
if ((event.state & gdk.SHIFT_MASK) == gdk.SHIFT_MASK):
shift = 1
return ctrl, shift
def OnButtonDown(self, wid, event):
"""Mouse button pressed."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
button = event.button
if button == 3:
self._Iren.RightButtonPressEvent()
return gtk.TRUE
elif button == 1:
self._Iren.LeftButtonPressEvent()
return gtk.TRUE
elif button == 2:
self._Iren.MiddleButtonPressEvent()
return gtk.TRUE
else:
return gtk.FALSE
def OnButtonUp(self, wid, event):
"""Mouse button released."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
button = event.button
if button == 3:
self._Iren.RightButtonReleaseEvent()
return gtk.TRUE
elif button == 1:
self._Iren.LeftButtonReleaseEvent()
return gtk.TRUE
elif button == 2:
self._Iren.MiddleButtonReleaseEvent()
return gtk.TRUE
return gtk.FALSE
def OnMouseMove(self, wid, event):
"""Mouse has moved."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
self._Iren.MouseMoveEvent()
return gtk.TRUE
def OnEnter(self, wid, event):
"""Entering the vtkRenderWindow."""
self.grab_focus()
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
self._Iren.EnterEvent()
return gtk.TRUE
def OnLeave(self, wid, event):
"""Leaving the vtkRenderWindow."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
self._Iren.LeaveEvent()
return gtk.TRUE
def OnKeyPress(self, wid, event):
"""Key pressed."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
keycode, keysym = event.keyval, event.string
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
key, 0, keysym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
return gtk.TRUE
def OnKeyRelease(self, wid, event):
"Key released."
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
keycode, keysym = event.keyval, event.string
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
key, 0, keysym)
self._Iren.KeyReleaseEvent()
return gtk.TRUE
def Initialize(self):
if self.__Created:
self._Iren.Initialize()
def SetPicker(self, picker):
self._Iren.SetPicker(picker)
def GetPicker(self, picker):
return self._Iren.GetPicker()
def main():
# The main window
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("A GtkVTKRenderWindow Demo!")
window.connect("destroy", gtk.mainquit)
window.connect("delete_event", gtk.mainquit)
window.set_border_width(10)
# A VBox into which widgets are packed.
vbox = gtk.VBox(spacing=3)
window.add(vbox)
vbox.show()
# The GtkVTKRenderWindow
gvtk = GtkGLExtVTKRenderWindowInteractor()
#gvtk.SetDesiredUpdateRate(1000)
gvtk.set_size_request(400, 400)
vbox.pack_start(gvtk)
gvtk.show()
gvtk.Initialize()
gvtk.Start()
# prevents 'q' from exiting the app.
gvtk.AddObserver("ExitEvent", lambda o,e,x=None: x)
# The VTK stuff.
cone = vtk.vtkConeSource()
cone.SetResolution(80)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
#coneActor = vtk.vtkLODActor()
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(0.5, 0.5, 1.0)
ren = vtk.vtkRenderer()
gvtk.GetRenderWindow().AddRenderer(ren)
ren.AddActor(coneActor)
# A simple quit button
quit = gtk.Button("Quit!")
quit.connect("clicked", gtk.mainquit)
vbox.pack_start(quit)
quit.show()
# show the main window and start event processing.
window.show()
gtk.mainloop()
if __name__ == "__main__":
main()
| {
"content_hash": "db56ec63b31853520aa68ff643c69d86",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 74,
"avg_line_length": 33.43092105263158,
"alnum_prop": 0.5763062087966152,
"repo_name": "hlzz/dotfiles",
"id": "d4a6bd2983e2bbe48a9480cf89322acc98428955",
"size": "10163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/Wrapping/Python/vtk/gtk/GtkGLExtVTKRenderWindowInteractor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
from builtins import range
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsContainer
from apache_beam.metrics.metricbase import MetricName
class TestMetricKey(unittest.TestCase):
def test_equality_for_key_with_labels(self):
test_labels = {'label1', 'value1'}
test_object = MetricKey(
'step', MetricName('namespace', 'name'), labels=test_labels)
same_labels = MetricKey(
'step', MetricName('namespace', 'name'), labels={'label1', 'value1'})
same_label_reference = MetricKey(
'step', MetricName('namespace', 'name'), labels=test_labels)
self.assertEqual(test_object, same_labels)
self.assertEqual(test_object, same_label_reference)
self.assertEqual(hash(test_object), hash(same_labels))
self.assertEqual(hash(test_object), hash(same_label_reference))
def test_inequality_for_key_with_labels(self):
test_labels = {'label1', 'value1'}
test_object = MetricKey(
'step', MetricName('namespace', 'name'), labels=test_labels)
no_labels = MetricKey('step', MetricName('namespace', 'name'))
diff_label_key = MetricKey(
'step', MetricName('namespace', 'name'), labels={'l1_diff', 'value1'})
diff_label_value = MetricKey(
'step', MetricName('namespace', 'name'), labels={'label1', 'v1_diff'})
self.assertNotEqual(test_object, no_labels)
self.assertNotEqual(test_object, diff_label_key)
self.assertNotEqual(test_object, diff_label_value)
self.assertNotEqual(hash(test_object), hash(no_labels))
self.assertNotEqual(hash(test_object), hash(diff_label_key))
self.assertNotEqual(hash(test_object), hash(diff_label_value))
def test_equality_for_key_with_no_labels(self):
test_object = MetricKey('step', MetricName('namespace', 'name'))
same = MetricKey('step', MetricName('namespace', 'name'))
self.assertEqual(test_object, same)
self.assertEqual(hash(test_object), hash(same))
diff_step = MetricKey('step_diff', MetricName('namespace', 'name'))
diff_namespace = MetricKey('step', MetricName('namespace_diff', 'name'))
diff_name = MetricKey('step', MetricName('namespace', 'name_diff'))
self.assertNotEqual(test_object, diff_step)
self.assertNotEqual(test_object, diff_namespace)
self.assertNotEqual(test_object, diff_name)
self.assertNotEqual(hash(test_object), hash(diff_step))
self.assertNotEqual(hash(test_object), hash(diff_namespace))
self.assertNotEqual(hash(test_object), hash(diff_name))
class TestMetricsContainer(unittest.TestCase):
def test_add_to_counter(self):
mc = MetricsContainer('astep')
counter = mc.get_counter(MetricName('namespace', 'name'))
counter.inc()
counter = mc.get_counter(MetricName('namespace', 'name'))
self.assertEqual(counter.value, 1)
def test_get_cumulative_or_updates(self):
mc = MetricsContainer('astep')
all_values = []
for i in range(1, 11):
counter = mc.get_counter(MetricName('namespace', 'name{}'.format(i)))
distribution = mc.get_distribution(
MetricName('namespace', 'name{}'.format(i)))
gauge = mc.get_gauge(MetricName('namespace', 'name{}'.format(i)))
counter.inc(i)
distribution.update(i)
gauge.set(i)
all_values.append(i)
# Retrieve ALL updates.
cumulative = mc.get_cumulative()
self.assertEqual(len(cumulative.counters), 10)
self.assertEqual(len(cumulative.distributions), 10)
self.assertEqual(len(cumulative.gauges), 10)
self.assertEqual(set(all_values),
set([v for _, v in cumulative.counters.items()]))
self.assertEqual(set(all_values),
set([v.value for _, v in cumulative.gauges.items()]))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6e95821edf5d5d0c414ace7122c013a5",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 78,
"avg_line_length": 40.797872340425535,
"alnum_prop": 0.6826597131681877,
"repo_name": "RyanSkraba/beam",
"id": "fc363a4ec1d87df31eb31a6e78b5ee7ef526a833",
"size": "4620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/metrics/execution_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
} |
"""
encapsulation.py
Created by Thomas Mangin on 2014-06-21.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from struct import pack
from struct import unpack
from exabgp.bgp.message.open.asn import ASN
from exabgp.bgp.message.update.attribute.community.extended import ExtendedCommunity
# ================================================================== TrafficRate
# RFC 5575
@ExtendedCommunity.register
class TrafficRate (ExtendedCommunity):
COMMUNITY_TYPE = 0x80
COMMUNITY_SUBTYPE = 0x06
__slots__ = ['asn','rate']
def __init__ (self, asn, rate, community=None):
self.asn = asn
self.rate = rate
ExtendedCommunity.__init__(
self,
community if community is not None else pack(
"!2sHf",
self._packedTypeSubtype(),
asn,rate
)
)
def __repr__ (self):
return "rate-limit %d" % self.rate
@staticmethod
def unpack (data):
asn,rate = unpack('!Hf',data[2:8])
return TrafficRate(ASN(asn),rate,data[:8])
# ================================================================ TrafficAction
# RFC 5575
@ExtendedCommunity.register
class TrafficAction (ExtendedCommunity):
COMMUNITY_TYPE = 0x80
COMMUNITY_SUBTYPE = 0x07
_sample = {
False: 0x0,
True: 0x2,
}
_terminal = {
False: 0x0,
True: 0x1,
}
__slots__ = ['sample','terminal']
def __init__ (self, sample, terminal, community=None):
self.sample = sample
self.terminal = terminal
bitmask = self._sample[sample] | self._terminal[terminal]
ExtendedCommunity.__init__(
self,
community if community is not None else pack(
'!2sLBB',
self._packedTypeSubtype(),
0,0,bitmask
)
)
def __repr__ (self):
s = []
if self.sample:
s.append('sample')
if self.terminal:
s.append('terminal')
return 'action %s' % '-'.join(s)
@staticmethod
def unpack (data):
bit, = unpack('!B',data[7])
sample = bool(bit & 0x02)
terminal = bool(bit & 0x01)
return TrafficAction(sample,terminal,data[:8])
# ============================================================== TrafficRedirect
# RFC 5575
@ExtendedCommunity.register
class TrafficRedirect (ExtendedCommunity):
COMMUNITY_TYPE = 0x80
COMMUNITY_SUBTYPE = 0x08
__slots__ = ['asn','target']
def __init__ (self, asn, target, community=None):
self.asn = asn
self.target = target
ExtendedCommunity.__init__(
self,
community if community is not None else pack(
"!2sHL",
self._packedTypeSubtype(),
asn,target
)
)
def __repr__ (self):
return "redirect:%s:%s" % (self.asn,self.target)
@staticmethod
def unpack (data):
asn,target = unpack('!HL',data[2:8])
return TrafficRedirect(ASN(asn),target,data[:8])
# ================================================================== TrafficMark
# RFC 5575
@ExtendedCommunity.register
class TrafficMark (ExtendedCommunity):
COMMUNITY_TYPE = 0x80
COMMUNITY_SUBTYPE = 0x09
__slots__ = ['dscp']
def __init__ (self, dscp, community=None):
self.dscp = dscp
ExtendedCommunity.__init__(
self,
community if community is not None else pack(
"!2sLBB",
self._packedTypeSubtype(),
0,0,dscp
)
)
def __repr__ (self):
return "mark %d" % self.dscp
@staticmethod
def unpack (data):
dscp, = unpack('!B',data[7])
return TrafficMark(dscp,data[:8])
# =============================================================== TrafficNextHop
# draft-simpson-idr-flowspec-redirect-02
# XXX: FIXME: I guess this should be a subclass of NextHop or IP ..
@ExtendedCommunity.register
class TrafficNextHop (ExtendedCommunity):
COMMUNITY_TYPE = 0x80
COMMUNITY_SUBTYPE = 0x00
__slots__ = ['copy']
def __init__ (self, copy, community=None):
self.copy = copy
ExtendedCommunity.__init__(
self,
community if community is not None else pack(
"!2sLH",
self._packedTypeSubtype(),
0,1 if copy else 0
)
)
def __repr__ (self):
return "copy-to-nexthop" if self.copy else "redirect-to-nexthop"
@staticmethod
def unpack (data):
bit, = unpack('!B',data[7])
return TrafficNextHop(bool(bit & 0x01),data[:8])
# ============================================================ TrafficRedirectIP
# RFC 5575
# If we need to provide the <IP>:<ASN> form for the FlowSpec Redirect ...
# import socket
# Do not use socket, use IPv4.ntop or pton
# TrafficRedirectASN = TrafficRedirect
# class TrafficRedirectIP (ExtendedCommunity):
# COMMUNITY_TYPE = 0x80
# COMMUNITY_SUBTYPE = 0x08
# def __init__ (self, ip, target, community=None):
# self.ip = ip
# self.target = target
# ExtendedCommunity.__init__(self,community if community is not None else pack("!BB4sH",0x80,0x08,socket.inet_pton(socket.AF_INET,ip),target))
# def __str__ (self):
# return "redirect %s:%d" % (self.ip,self.target)
# @staticmethod
# def unpack (data):
# ip,target = unpack('!4sH',data[2:8])
# return TrafficRedirectIP(socket.inet_ntop(socket.AF_INET,ip),target,data[:8])
| {
"content_hash": "ecc874706d67e5fee8f21fddf9e50f4f",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 144,
"avg_line_length": 23.004716981132077,
"alnum_prop": 0.6188230469550954,
"repo_name": "dneiter/exabgp",
"id": "f50061662a71192f81ab2da3615554323ce7ded5",
"size": "4895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/exabgp/bgp/message/update/attribute/community/extended/traffic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1191786"
},
{
"name": "Shell",
"bytes": "17690"
}
],
"symlink_target": ""
} |
import sys
import unittest
from PySide import QtGui
from ibl_stuff.libs import qt_helpers
class Foo(QtGui.QDialog):
pass
class Bar():
pass
class SimpleTest(unittest.TestCase):
def test_get_parent_empty(self):
self.assertIsNone(qt_helpers.get_parent())
class TestQt(unittest.TestCase):
PARENT = None
def setUp(self):
try:
QtGui.QApplication(sys.argv)
except RuntimeError:
pass
self.PARENT = QtGui.QMainWindow()
Foo(self.PARENT)
def tearDown(self):
self.PARENT = None
def test_find_instance_success(self):
self.assertIsNotNone(qt_helpers.find_instance(self.PARENT, Foo))
def test_find_instance_fail(self):
self.assertIsNone(qt_helpers.find_instance(self.PARENT, Bar))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "5df3c1e29c0b2d22242469f8fdcd1925",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 19.72093023255814,
"alnum_prop": 0.6474056603773585,
"repo_name": "csaez/ibl_stuff",
"id": "cdeaa8203890e8d87c825f4adfde7c65d3cf0c44",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibl_stuff/tests/test_qt_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36578"
}
],
"symlink_target": ""
} |
from ..slack.resource import MsgResource
from ..slack.slackbot import SlackerAdapter
class DoNotDisturbManager(object):
def __init__(self):
self.slackbot = SlackerAdapter()
def focus(self, dnd=None):
if dnd.get("dnd_enabled", None):
self.slackbot.send_message(text=MsgResource.FOCUS)
else:
self.slackbot.send_message(text=MsgResource.FOCUS_FINISH)
| {
"content_hash": "24981f65da14c17ef746fa18e3f2beae",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 31.307692307692307,
"alnum_prop": 0.6756756756756757,
"repo_name": "DongjunLee/kino-bot",
"id": "aaa7b9e31512a742fec06e81e681d2040c086cee",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kino/dialog/dnd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259265"
}
],
"symlink_target": ""
} |
import json
import datetime
import os
from random import randint
import feedparser
from _core import *
"""
Pyvona Voices
[ Marlene, Hans, Kendra, Salli ]}
"""
# Get JSON Config
json_data = open(getConfigPath(__file__) + str(guid) + '.json').read()
data = json.loads(json_data)
## define time variable
wakeupTime = ""
# get current time
currentTime = datetime.datetime.strftime(datetime.datetime.now(), '%H:%M')
# get wakeup time
# a variable named time must be defined in the code
# say(currentTime)
try:
#
# The first Service gets executed
exec(data["services"][0]["compiled"])
except Exception:
print("the InitScript (first script of config was not working) ")
#
#
# Check if we should update Wakeup Time on Server
wakeupPath = getConfigPath(__file__) + '.wakeup'
if(os.path.isfile(wakeupPath)):
#
# Read Stored Time
storedTime = open(wakeupPath, 'r').read()
#
# If different, run update
if(storedTime != wakeupTime):
wr = open(wakeupPath, 'w')
wr.write(wakeupTime)
print(requests.put(base_full + "/api/devices/" + guid, json.dumps({"alarmTime": wakeupTime})))
else:
#
# Store Time
wr = open(wakeupPath, 'w')
wr.write(wakeupTime)
#
# Check if its Time for the Wakup Call
if(wakeupTime == currentTime):
#
# Wakeup Procedure started
# Loop Through Wakeup Services
for entry in data["services"]:
#
# Exceute Script
try:
exec(entry["compiled"])
except Exception:
print("The Alarmclock Procedure was going down!")
| {
"content_hash": "14bd934088d1efa99ce68f895043b947",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 98,
"avg_line_length": 20.916666666666668,
"alnum_prop": 0.6759628154050464,
"repo_name": "WakemeHackathon/AlarmPi",
"id": "07480f8af5b98e984e8a829bf2f0fd0a8999f745",
"size": "1552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AlarmProcess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13251"
},
{
"name": "Shell",
"bytes": "649"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
import frappe
import unittest, json, sys, os
import time
import xmlrunner
import importlib
from frappe.modules import load_doctype_module, get_module_name
from frappe.utils import cstr
import frappe.utils.scheduler
import cProfile, pstats
from six import StringIO
from six.moves import reload_module
from frappe.model.naming import revert_series_if_last
unittest_runner = unittest.TextTestRunner
SLOW_TEST_THRESHOLD = 2
def xmlrunner_wrapper(output):
"""Convenience wrapper to keep method signature unchanged for XMLTestRunner and TextTestRunner"""
def _runner(*args, **kwargs):
kwargs['output'] = output
return xmlrunner.XMLTestRunner(*args, **kwargs)
return _runner
def main(app=None, module=None, doctype=None, verbose=False, tests=(),
force=False, profile=False, junit_xml_output=None, ui_tests=False,
doctype_list_path=None, skip_test_records=False, failfast=False):
global unittest_runner
if doctype_list_path:
app, doctype_list_path = doctype_list_path.split(os.path.sep, 1)
with open(frappe.get_app_path(app, doctype_list_path), 'r') as f:
doctype = f.read().strip().splitlines()
xmloutput_fh = None
if junit_xml_output:
xmloutput_fh = open(junit_xml_output, 'wb')
unittest_runner = xmlrunner_wrapper(xmloutput_fh)
else:
unittest_runner = unittest.TextTestRunner
try:
frappe.flags.print_messages = verbose
frappe.flags.in_test = True
if not frappe.db:
frappe.connect()
# if not frappe.conf.get("db_name").startswith("test_"):
# raise Exception, 'db_name must start with "test_"'
# workaround! since there is no separate test db
frappe.clear_cache()
frappe.utils.scheduler.disable_scheduler()
set_test_email_config()
if not frappe.flags.skip_before_tests:
if verbose:
print('Running "before_tests" hooks')
for fn in frappe.get_hooks("before_tests", app_name=app):
frappe.get_attr(fn)()
if doctype:
ret = run_tests_for_doctype(doctype, verbose, tests, force, profile, junit_xml_output=junit_xml_output)
elif module:
ret = run_tests_for_module(module, verbose, tests, profile, junit_xml_output=junit_xml_output)
else:
ret = run_all_tests(app, verbose, profile, ui_tests, failfast=failfast, junit_xml_output=junit_xml_output)
frappe.db.commit()
# workaround! since there is no separate test db
frappe.clear_cache()
return ret
finally:
if xmloutput_fh:
xmloutput_fh.flush()
xmloutput_fh.close()
def set_test_email_config():
frappe.conf.update({
"auto_email_id": "[email protected]",
"mail_server": "smtp.example.com",
"mail_login": "[email protected]",
"mail_password": "test",
"admin_password": "admin"
})
class TimeLoggingTestResult(unittest.TextTestResult):
def startTest(self, test):
self._started_at = time.time()
super(TimeLoggingTestResult, self).startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self._started_at
name = self.getDescription(test)
if elapsed >= SLOW_TEST_THRESHOLD:
self.stream.write("\n{} ({:.03}s)\n".format(name, elapsed))
super(TimeLoggingTestResult, self).addSuccess(test)
def run_all_tests(app=None, verbose=False, profile=False, ui_tests=False, failfast=False, junit_xml_output=False):
import os
apps = [app] if app else frappe.get_installed_apps()
test_suite = unittest.TestSuite()
for app in apps:
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public'):
if dontwalk in folders:
folders.remove(dontwalk)
# print path
for filename in files:
filename = cstr(filename)
if filename.startswith("test_") and filename.endswith(".py")\
and filename != 'test_runner.py':
# print filename[:-3]
_add_test(app, path, filename, verbose,
test_suite, ui_tests)
if junit_xml_output:
runner = unittest_runner(verbosity=1+(verbose and 1 or 0), failfast=failfast)
else:
runner = unittest_runner(resultclass=TimeLoggingTestResult, verbosity=1+(verbose and 1 or 0), failfast=failfast)
if profile:
pr = cProfile.Profile()
pr.enable()
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def run_tests_for_doctype(doctypes, verbose=False, tests=(), force=False, profile=False, junit_xml_output=False):
modules = []
if not isinstance(doctypes, (list, tuple)):
doctypes = [doctypes]
for doctype in doctypes:
module = frappe.db.get_value("DocType", doctype, "module")
if not module:
print('Invalid doctype {0}'.format(doctype))
sys.exit(1)
test_module = get_module_name(doctype, module, "test_")
if force:
for name in frappe.db.sql_list("select name from `tab%s`" % doctype):
frappe.delete_doc(doctype, name, force=True)
make_test_records(doctype, verbose=verbose, force=force)
modules.append(importlib.import_module(test_module))
return _run_unittest(modules, verbose=verbose, tests=tests, profile=profile, junit_xml_output=junit_xml_output)
def run_tests_for_module(module, verbose=False, tests=(), profile=False, junit_xml_output=False):
module = importlib.import_module(module)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
return _run_unittest(module, verbose=verbose, tests=tests, profile=profile, junit_xml_output=junit_xml_output)
def run_setup_wizard_ui_test(app=None, verbose=False, profile=False):
'''Run setup wizard UI test using test_test_runner'''
frappe.flags.run_setup_wizard_ui_test = 1
return run_ui_tests(app=app, test=None, verbose=verbose, profile=profile)
def run_ui_tests(app=None, test=None, test_list=None, verbose=False, profile=False):
'''Run a single unit test for UI using test_test_runner'''
module = importlib.import_module('frappe.tests.ui.test_test_runner')
frappe.flags.ui_test_app = app
if test_list:
frappe.flags.ui_test_list = test_list
else:
frappe.flags.ui_test_path = test
return _run_unittest(module, verbose=verbose, tests=(), profile=profile)
def _run_unittest(modules, verbose=False, tests=(), profile=False, junit_xml_output=False):
test_suite = unittest.TestSuite()
if not isinstance(modules, (list, tuple)):
modules = [modules]
for module in modules:
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
if tests:
for each in module_test_cases:
for test_case in each.__dict__["_tests"]:
if test_case.__dict__["_testMethodName"] in tests:
test_suite.addTest(test_case)
else:
test_suite.addTest(module_test_cases)
if junit_xml_output:
runner = unittest_runner(verbosity=1+(verbose and 1 or 0))
else:
runner = unittest_runner(resultclass=TimeLoggingTestResult, verbosity=1+(verbose and 1 or 0))
if profile:
pr = cProfile.Profile()
pr.enable()
frappe.flags.tests_verbose = verbose
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def _add_test(app, path, filename, verbose, test_suite=None, ui_tests=False):
import os
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
return
app_path = frappe.get_pymodule_path(app)
relative_path = os.path.relpath(path, app_path)
if relative_path=='.':
module_name = app
else:
module_name = '{app}.{relative_path}.{module_name}'.format(app=app,
relative_path=relative_path.replace('/', '.'), module_name=filename[:-3])
module = importlib.import_module(module_name)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
is_ui_test = True if hasattr(module, 'TestDriver') else False
if is_ui_test != ui_tests:
return
if not test_suite:
test_suite = unittest.TestSuite()
if os.path.basename(os.path.dirname(path))=="doctype":
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
if os.path.exists(txt_file):
with open(txt_file, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype, verbose)
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
def make_test_records(doctype, verbose=0, force=False):
if not frappe.db:
frappe.connect()
if frappe.flags.skip_test_records:
return
for options in get_dependencies(doctype):
if options == "[Select]":
continue
if not options in frappe.local.test_objects:
frappe.local.test_objects[options] = []
make_test_records(options, verbose, force)
make_test_records_for_doctype(options, verbose, force)
def get_modules(doctype):
module = frappe.db.get_value("DocType", doctype, "module")
try:
test_module = load_doctype_module(doctype, module, "test_")
if test_module:
reload_module(test_module)
except ImportError:
test_module = None
return module, test_module
def get_dependencies(doctype):
module, test_module = get_modules(doctype)
meta = frappe.get_meta(doctype)
link_fields = meta.get_link_fields()
for df in meta.get_table_fields():
link_fields.extend(frappe.get_meta(df.options).get_link_fields())
options_list = [df.options for df in link_fields] + [doctype]
if hasattr(test_module, "test_dependencies"):
options_list += test_module.test_dependencies
options_list = list(set(options_list))
if hasattr(test_module, "test_ignore"):
for doctype_name in test_module.test_ignore:
if doctype_name in options_list:
options_list.remove(doctype_name)
return options_list
def make_test_records_for_doctype(doctype, verbose=0, force=False):
if not force and doctype in get_test_record_log():
return
module, test_module = get_modules(doctype)
if verbose:
print("Making for " + doctype)
if hasattr(test_module, "_make_test_records"):
frappe.local.test_objects[doctype] += test_module._make_test_records(verbose)
elif hasattr(test_module, "test_records"):
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_module.test_records, verbose, force)
else:
test_records = frappe.get_test_records(doctype)
if test_records:
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_records, verbose, force)
elif verbose:
print_mandatory_fields(doctype)
add_to_test_record_log(doctype)
def make_test_objects(doctype, test_records=None, verbose=None, reset=False):
'''Make test objects from given list of `test_records` or from `test_records.json`'''
records = []
def revert_naming(d):
if getattr(d, 'naming_series', None):
revert_series_if_last(d.naming_series, d.name)
if test_records is None:
test_records = frappe.get_test_records(doctype)
for doc in test_records:
if not doc.get("doctype"):
doc["doctype"] = doctype
d = frappe.copy_doc(doc)
if d.meta.get_field("naming_series"):
if not d.naming_series:
d.naming_series = "_T-" + d.doctype + "-"
if doc.get('name'):
d.name = doc.get('name')
else:
d.set_new_name()
if frappe.db.exists(d.doctype, d.name) and not reset:
frappe.db.rollback()
# do not create test records, if already exists
continue
# submit if docstatus is set to 1 for test record
docstatus = d.docstatus
d.docstatus = 0
try:
d.run_method("before_test_insert")
d.insert()
if docstatus == 1:
d.submit()
except frappe.NameError:
revert_naming(d)
except Exception as e:
if d.flags.ignore_these_exceptions_in_test and e.__class__ in d.flags.ignore_these_exceptions_in_test:
revert_naming(d)
else:
raise
records.append(d.name)
frappe.db.commit()
return records
def print_mandatory_fields(doctype):
print("Please setup make_test_records for: " + doctype)
print("-" * 60)
meta = frappe.get_meta(doctype)
print("Autoname: " + (meta.autoname or ""))
print("Mandatory Fields: ")
for d in meta.get("fields", {"reqd":1}):
print(d.parent + ":" + d.fieldname + " | " + d.fieldtype + " | " + (d.options or ""))
print()
def add_to_test_record_log(doctype):
'''Add `doctype` to site/.test_log
`.test_log` is a cache of all doctypes for which test records are created'''
test_record_log = get_test_record_log()
if not doctype in test_record_log:
frappe.flags.test_record_log.append(doctype)
with open(frappe.get_site_path('.test_log'), 'w') as f:
f.write('\n'.join(filter(None, frappe.flags.test_record_log)))
def get_test_record_log():
'''Return the list of doctypes for which test records have been created'''
if 'test_record_log' not in frappe.flags:
if os.path.exists(frappe.get_site_path('.test_log')):
with open(frappe.get_site_path('.test_log'), 'r') as f:
frappe.flags.test_record_log = f.read().splitlines()
else:
frappe.flags.test_record_log = []
return frappe.flags.test_record_log
| {
"content_hash": "76217fe38095678c0f7059a09df0c5dc",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 114,
"avg_line_length": 30.070093457943926,
"alnum_prop": 0.7089355089355089,
"repo_name": "vjFaLk/frappe",
"id": "e7ec13afe6d6b3f36a9bcfa9cd1afdbe78b66d0b",
"size": "12971",
"binary": false,
"copies": "1",
"ref": "refs/heads/parsimony-production",
"path": "frappe/test_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "290337"
},
{
"name": "HTML",
"bytes": "179507"
},
{
"name": "JavaScript",
"bytes": "2179734"
},
{
"name": "Less",
"bytes": "146135"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2774237"
},
{
"name": "SCSS",
"bytes": "15721"
},
{
"name": "Shell",
"bytes": "3875"
},
{
"name": "Vue",
"bytes": "95109"
}
],
"symlink_target": ""
} |
import pytest
from notifiers.exceptions import BadArguments
from notifiers.exceptions import NotificationError
from notifiers.exceptions import ResourceError
provider = "gitter"
class TestGitter:
def test_metadata(self, provider):
assert provider.metadata == {
"base_url": "https://api.gitter.im/v1/rooms",
"message_url": "/{room_id}/chatMessages",
"name": "gitter",
"site_url": "https://gitter.im",
}
@pytest.mark.parametrize(
"data, message",
[
({}, "message"),
({"message": "foo"}, "token"),
({"message": "foo", "token": "bar"}, "room_id"),
],
)
def test_missing_required(self, provider, data, message):
data["env_prefix"] = "test"
with pytest.raises(BadArguments) as e:
provider.notify(**data)
assert f"'{message}' is a required property" in e.value.message
def test_bad_request(self, provider):
data = {"token": "foo", "room_id": "baz", "message": "bar"}
with pytest.raises(NotificationError) as e:
rsp = provider.notify(**data)
rsp.raise_on_errors()
assert "Unauthorized" in e.value.message
@pytest.mark.online
def test_bad_room_id(self, provider):
data = {"room_id": "baz", "message": "bar"}
with pytest.raises(NotificationError) as e:
rsp = provider.notify(**data)
rsp.raise_on_errors()
assert "Bad Request" in e.value.message
@pytest.mark.online
def test_sanity(self, provider, test_message):
data = {"message": test_message}
rsp = provider.notify(**data)
rsp.raise_on_errors()
def test_gitter_resources(self, provider):
assert provider.resources
for resource in provider.resources:
assert getattr(provider, resource)
assert "rooms" in provider.resources
class TestGitterResources:
resource = "rooms"
def test_gitter_rooms_attribs(self, resource):
assert resource.schema == {
"type": "object",
"properties": {
"token": {"type": "string", "title": "access token"},
"filter": {"type": "string", "title": "Filter results"},
},
"required": ["token"],
"additionalProperties": False,
}
assert resource.name == provider
assert resource.required == {"required": ["token"]}
def test_gitter_rooms_negative(self, resource):
with pytest.raises(BadArguments):
resource(env_prefix="foo")
def test_gitter_rooms_negative_2(self, resource):
with pytest.raises(ResourceError) as e:
resource(token="foo")
assert e.value.errors == ["Unauthorized"]
assert e.value.response.status_code == 401
@pytest.mark.online
def test_gitter_rooms_positive(self, resource):
rsp = resource()
assert isinstance(rsp, list)
@pytest.mark.online
def test_gitter_rooms_positive_with_filter(self, resource):
assert resource(filter="notifiers/testing")
class TestGitterCLI:
"""Test Gitter specific CLI commands"""
def test_gitter_rooms_negative(self, cli_runner):
cmd = "gitter rooms --token bad_token".split()
result = cli_runner(cmd)
assert result.exit_code
assert not result.output
@pytest.mark.online
def test_gitter_rooms_positive(self, cli_runner):
cmd = "gitter rooms".split()
result = cli_runner(cmd)
assert not result.exit_code
assert "notifiers/testing" in result.output
@pytest.mark.online
def test_gitter_rooms_with_query(self, cli_runner):
cmd = "gitter rooms --filter notifiers/testing".split()
result = cli_runner(cmd)
assert not result.exit_code
assert "notifiers/testing" in result.output
| {
"content_hash": "1c904d53d647b66a7a24f9c9017ae928",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 72,
"avg_line_length": 33.08474576271186,
"alnum_prop": 0.5998975409836066,
"repo_name": "liiight/notifiers",
"id": "0da81888a236fc77b0da9e79bca79844e57e062c",
"size": "3904",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/providers/test_gitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "811"
},
{
"name": "Dockerfile",
"bytes": "106"
},
{
"name": "Makefile",
"bytes": "612"
},
{
"name": "Python",
"bytes": "189594"
}
],
"symlink_target": ""
} |
"""Implement (a generalized version of) the idea from papers [1, 2].
[1] Alexandre Sedoglavic, "A non-commutative algorithm for multiplying (7 × 7)
matrices using 250 multiplications" (2017).
[2] Drevet, Charles-Éric, Md Nazrul Islam, and Éric Schost. "Optimization
techniques for small matrix multiplication." Theoretical Computer Science 412.22
(2011): 2219-2236.
"""
from typing import Any, Dict, Iterator, List, Sequence, Tuple
import numpy as np
from alphatensor.recombination import sota
def _tensor_shape_to_matrix_sizes(
tensor_shape: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Returns the sizes of the multiplied matrices from the matmul tensor shape.
When multiplying an [a, b] and [b, c] matrix, the size of the corresponding
matrix multiplication tensor T_{a, b, c} is [ab, bc, ca]. This function
computes the inverse mapping from the tensor size to the matrix sizes.
Args:
tensor_shape: Shape of a 3D matrix multiplication tensor T_{a, b, c}.
Returns:
The three integers a, b, c describing the matrix sizes being multiplied.
"""
ab, bc, ca = tensor_shape
a = int(np.sqrt(ab * ca // bc))
b = int(np.sqrt(ab * bc // ca))
c = int(np.sqrt(bc * ca // ab))
assert a * b == ab and b * c == bc and c * a == ca
return a, b, c
def _factorization_2d_to_3d(
factors: Tuple[np.ndarray, np.ndarray, np.ndarray],
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Converts factorization with `u` of shape [a*b, rank] into [a, b, rank]."""
u, v, w = factors
a, b, c = _tensor_shape_to_matrix_sizes((u.shape[0], v.shape[0], w.shape[0]))
rank = u.shape[-1]
u = u.reshape(a, b, rank)
v = v.reshape(b, c, rank)
w = w.reshape(c, a, rank)
return u, v, w
def _block_fillings(num_blocks: int, budget: int) -> Iterator[List[int]]:
"""Iterates over all options of filling `num_blocks` with `budget` balls."""
if num_blocks == 1:
yield [budget]
return
for i in range(budget + 1):
for rest in _block_fillings(num_blocks - 1, budget - i):
yield [i] + rest
def _process_additions(matricized_factor_vector: np.ndarray,
row_nonzeros: Sequence[int],
col_nonzeros: Sequence[int]) -> Tuple[int, int]:
"""Returns the nonzero matrix size after adding multiple matrices together.
Nonzero elements of a factor vector stipulate that the corresponding entries
of the base matrix (which in this case are themselves matrices) are to be
added up. The number of potentially nonzero rows in this sum is the maximum
over the number of nonzero rows in each summand, and similarly for the number
of columns. See Supplementary Information of the paper for an illustrative
example.
Args:
matricized_factor_vector: [x, y]-shaped array representing a single factor
vector (`u`, or `v`, or `w`) in matrix form. For example, [x, y] = [a, b]
when this is a `u` vector.
row_nonzeros: List of length x, with the i-th entry specifying the number of
rows of the target matrix that were allocated to the i-th row of the base
matrix on the first level of recursion.
col_nonzeros: List of length y, with the i-th entry specifying the number of
columns of the target matrix that were allocated to the i-th column of the
base matrix on the first level of recursion.
Returns:
Two integers describing respectively the largest number of nonzero rows and
columns after the submatrices corresponding to nonzero entries of the factor
vector `matricized_factor_vector` are added up.
"""
max_rows = 0
max_cols = 0
for i, j in zip(*np.nonzero(matricized_factor_vector)):
max_rows = max(max_rows, row_nonzeros[i])
max_cols = max(max_cols, col_nonzeros[j])
return max_rows, max_cols
def recombine(target_matrix_sizes: Tuple[int, int, int],
base_factors: Tuple[np.ndarray, np.ndarray, np.ndarray],
) -> Dict[str, Any]:
"""Decomposes T_{a, b, c} using `base_factors` as the 1st level of recursion.
See Supplementary Information of the paper for more details.
Args:
target_matrix_sizes: Triplet (a, b, c) specifing the matrix multiplication
problem of multiplying an [a, b] matrix by a [b, c] matrix. Equivalently,
specifies a matrix multiplication tensor T_{a, b, c} to be decomposed.
base_factors: Three arrays providing a factorization of a (usually smaller)
matrix multiplication tensor T_{base_a, base_b, base_c}. This algorithm
will be used on the first level of recursion when decomposing T_{a, b, c}.
Returns:
Dictionary with information about the best rank discovered for T_{a, b, c}.
"""
base_rank = base_factors[0].shape[-1]
base_tensor_shape = tuple(v.shape[0] for v in base_factors)
base_a, base_b, base_c = _tensor_shape_to_matrix_sizes(base_tensor_shape)
u, v, w = _factorization_2d_to_3d(base_factors)
# The matrix multiplication tensor T_{a, b, c} by convention represents the
# operation (A, B) -> (AB)^T, i.e. with an additional transposition. Here we
# will work with the non-transposed version for simplicity.
w = w.transpose(1, 0, 2)
best = {}
# To apply an algorithm for (base_a, base_b, base_c) to the target problem
# (target_a, target_b, target_c), we try all possibilities of how to allocate
# the `target_a` rows of the original problem to the `base_a` rows of the
# algorithm to be applied on the first level of recursion; and similarly for
# the `target_b` and `target_c` dimensions.
target_a, target_b, target_c = target_matrix_sizes
for allocation_a in _block_fillings(base_a, target_a):
for allocation_b in _block_fillings(base_b, target_b):
for allocation_c in _block_fillings(base_c, target_c):
total_rank = 0
small_matrix_sizes = []
for r in range(base_rank):
u1, u2 = _process_additions(u[:, :, r], allocation_a, allocation_b)
v1, v2 = _process_additions(v[:, :, r], allocation_b, allocation_c)
w1, w2 = _process_additions(w[:, :, r], allocation_a, allocation_c)
# We now need to compute the product of [u1, u2] and [v1, v2]-shaped
# matrices (with appropriate zero-padding), and then extract the
# [w1, w2] upper-left portion of the resulting product. Note that this
# can be achieved by an algorithm that multiplies matrices of sizes
# [min(u1, w1), min(u2, v1)] and [min(u2, v1), min(v2, w2)] since it
# is not necessary to compute elements that will end up zero/unused.
current_matrix_sizes = min(u1, w1), min(u2, v1), min(v2, w2)
total_rank += sota.get_sota_rank(*current_matrix_sizes)
small_matrix_sizes.append(current_matrix_sizes)
if not best or total_rank < best['rank']:
best = {
'rank': total_rank,
'small_matrix_sizes': small_matrix_sizes,
'allocation_pattern': (allocation_a, allocation_b, allocation_c),
}
return best
| {
"content_hash": "deb90621974e50589868faeb7072c688",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 80,
"avg_line_length": 43.17901234567901,
"alnum_prop": 0.6677626876340244,
"repo_name": "deepmind/alphatensor",
"id": "efa84565ab957f8cd27e403774a7cc3fc6e6fa2b",
"size": "7672",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "recombination/recombination.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "15509"
},
{
"name": "Python",
"bytes": "54224"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import mock
import os.path
import responses
import pytest
from datetime import datetime
from flask import current_app
from uuid import UUID
from changes.config import db
from changes.constants import Status, Result
from changes.models import (
Artifact, TestCase, Patch, LogSource, LogChunk, Job, JobPhase, FileCoverage
)
from changes.backends.jenkins.builder import JenkinsBuilder
from changes.testutils import (
BackendTestCase, eager_tasks, SAMPLE_DIFF, SAMPLE_XUNIT, SAMPLE_COVERAGE
)
class BaseTestCase(BackendTestCase):
provider = 'jenkins'
builder_cls = JenkinsBuilder
builder_options = {
'base_url': 'http://jenkins.example.com',
'job_name': 'server',
}
def setUp(self):
self.project = self.create_project()
super(BaseTestCase, self).setUp()
def get_builder(self, **options):
base_options = self.builder_options.copy()
base_options.update(options)
return self.builder_cls(app=current_app, **base_options)
def load_fixture(self, filename):
filepath = os.path.join(
os.path.dirname(__file__),
filename,
)
with open(filepath, 'rb') as fp:
return fp.read()
# TODO(dcramer): these tests need to ensure we're passing the right parameters
# to jenkins
class CreateBuildTest(BaseTestCase):
@responses.activate
def test_queued_creation(self):
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build/api/json/',
body='',
status=201)
responses.add(
responses.GET, 'http://jenkins.example.com/queue/api/xml/?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%2281d1596fd4d642f4a6bdf86c45e014e8%22%5D%2Fid&wrapper=x',
body=self.load_fixture('fixtures/GET/queue_item_by_job_id.xml'),
match_querystring=True)
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/api/xml/?depth=1&xpath=/queue/item[action/parameter/name=%22CHANGES_BID%22%20and%20action/parameter/value=%2281d1596fd4d642f4a6bdf86c45e014e8%22]/id',
status=404,
match_querystring=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'))
builder = self.get_builder()
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data == {
'build_no': None,
'item_id': '13',
'job_name': 'server',
'queued': True,
'uri': None,
}
@responses.activate
def test_active_creation(self):
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build/api/json/',
body='',
status=201)
responses.add(
responses.GET, 'http://jenkins.example.com/queue/api/xml/?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22f9481a17aac446718d7893b6e1c6288b%22%5D%2Fid&wrapper=x',
status=404,
match_querystring=True)
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/api/xml/?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22f9481a17aac446718d7893b6e1c6288b%22%5D%2Fnumber&depth=1&wrapper=x',
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'),
match_querystring=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('f9481a17aac446718d7893b6e1c6288b'),
)
builder = self.get_builder()
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data == {
'build_no': '1',
'item_id': None,
'job_name': 'server',
'queued': False,
'uri': None,
}
@responses.activate
@mock.patch.object(JenkinsBuilder, '_find_job')
def test_patch(self, find_job):
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build/api/json/',
body='',
status=201)
find_job.return_value = {
'build_no': '1',
'item_id': None,
'job_name': 'server',
'queued': False,
}
patch = Patch(
repository=self.project.repository,
parent_revision_sha='7ebd1f2d750064652ef5bbff72452cc19e1731e0',
diff=SAMPLE_DIFF,
)
db.session.add(patch)
source = self.create_source(self.project, patch=patch)
build = self.create_build(self.project, source=source)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8')
)
builder = self.get_builder()
builder.create_job(job)
class CancelStepTest(BaseTestCase):
@responses.activate
def test_queued(self):
responses.add(
responses.POST, 'http://jenkins.example.com/queue/cancelItem?id=13',
match_querystring=True, status=302)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'item_id': 13,
'job_name': 'server',
}, status=Status.queued)
builder = self.get_builder()
builder.cancel_step(step)
assert step.result == Result.aborted
assert step.status == Status.finished
@responses.activate
def test_active(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/stop/',
body='', status=302)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'job_name': 'server',
}, status=Status.in_progress)
builder = self.get_builder()
builder.cancel_step(step)
assert step.status == Status.finished
assert step.result == Result.aborted
class SyncStepTest(BaseTestCase):
@responses.activate
def test_waiting_in_queue(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_pending.json'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
})
builder = self.get_builder()
builder.sync_step(step)
assert step.status == Status.queued
@responses.activate
def test_cancelled_in_queue(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_cancelled.json'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
})
builder = self.get_builder()
builder.sync_step(step)
assert step.status == Status.finished
assert step.result == Result.aborted
@responses.activate
def test_queued_to_active(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
})
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
@responses.activate
def test_success_result(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_success.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
assert step.status == Status.finished
assert step.result == Result.passed
assert step.date_finished is not None
@responses.activate
def test_failed_result(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
assert step.status == Status.finished
assert step.result == Result.failed
assert step.date_finished is not None
class SyncGenericResultsTest(BaseTestCase):
@responses.activate
def test_does_sync_log(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
source = LogSource.query.filter_by(job=job).first()
assert source.step == step
assert source.name == step.label
assert source.project == self.project
assert source.date_created == step.date_started
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 7
assert chunks[0].text == 'Foo bar'
assert step.data.get('log_offset') == 7
@responses.activate
@mock.patch('changes.backends.jenkins.builder.sync_artifact')
def test_does_fire_sync_artifacts(self, sync_artifact):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_with_artifacts.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
log_artifact = Artifact.query.filter(
Artifact.name == 'foobar.log',
Artifact.step == step,
).first()
assert log_artifact.data == {
"displayPath": "foobar.log",
"fileName": "foobar.log",
"relativePath": "artifacts/foobar.log",
}
sync_artifact.delay_if_needed.assert_any_call(
artifact_id=log_artifact.id.hex,
task_id=log_artifact.id.hex,
parent_task_id=step.id.hex,
skip_checks=False,
)
xunit_artifact = Artifact.query.filter(
Artifact.name == 'tests.xml',
Artifact.step == step,
).first()
assert xunit_artifact.data == {
"displayPath": "tests.xml",
"fileName": "tests.xml",
"relativePath": "artifacts/tests.xml",
}
sync_artifact.delay_if_needed.assert_any_call(
artifact_id=xunit_artifact.id.hex,
task_id=xunit_artifact.id.hex,
parent_task_id=step.id.hex,
skip_checks=False,
)
class SyncPhasedResultsTest(BaseTestCase):
@responses.activate
def test_does_sync_phases(self):
phase_data = {
"retcode": 0,
"command": ["echo", "foo bar"],
"log": "test.log",
"startTime": 1403645499.39586,
"endTime": 1403645500.398765,
"name": "Test"
}
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_with_phase_artifacts.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/test.phase.json',
body=json.dumps(phase_data))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
# the log should still get populated for the existing phase
source = LogSource.query.filter_by(job=job).first()
assert source.step == step
assert source.name == step.label
assert source.project == self.project
assert source.date_created == step.date_started
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 7
assert chunks[0].text == 'Foo bar'
assert step.data.get('log_offset') == 7
other_phases = list(JobPhase.query.filter(
JobPhase.job_id == job.id,
JobPhase.id != phase.id,
))
assert len(other_phases) == 1
test_phase = other_phases[0]
assert test_phase.label == 'Test'
assert test_phase.result == Result.passed
assert test_phase.status == Status.finished
assert test_phase.date_started == datetime(2014, 6, 24, 21, 31, 39, 395860)
assert test_phase.date_finished == datetime(2014, 6, 24, 21, 31, 40, 398765)
assert len(test_phase.steps) == 1
test_step = test_phase.steps[0]
assert test_step.label == step.label
assert test_step.result == test_phase.result
assert test_step.status == test_phase.status
assert test_step.node == step.node
assert test_step.data == {
'job_name': 'server',
'build_no': 2,
'generated': True,
}
assert test_step.date_started == test_phase.date_started
assert test_step.date_finished == test_phase.date_finished
log_artifact = Artifact.query.filter(
Artifact.name == 'test.log',
Artifact.step_id == test_step.id,
).first()
assert log_artifact.data == {
"displayPath": "test.log",
"fileName": "test.log",
"relativePath": "artifacts/test.log",
}
class SyncArtifactTest(BaseTestCase):
@responses.activate
def test_sync_artifact_as_log(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/foobar.log',
body='hello world')
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='foobar.log', data={
"displayPath": "foobar.log",
"fileName": "foobar.log",
"relativePath": "artifacts/foobar.log"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
source = LogSource.query.filter(
LogSource.job_id == job.id,
LogSource.name == 'foobar.log',
).first()
assert source is not None
assert source.step == step
assert source.project == self.project
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 11
assert chunks[0].text == 'hello world'
@responses.activate
def test_sync_artifact_as_xunit(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/xunit.xml',
body=SAMPLE_XUNIT,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='xunit.xml', data={
"displayPath": "xunit.xml",
"fileName": "xunit.xml",
"relativePath": "artifacts/xunit.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
test_list = list(TestCase.query.filter(
TestCase.job_id == job.id
))
assert len(test_list) == 2
@responses.activate
def test_sync_artifact_as_coverage(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/coverage.xml',
body=SAMPLE_COVERAGE,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='coverage.xml', data={
"displayPath": "coverage.xml",
"fileName": "coverage.xml",
"relativePath": "artifacts/coverage.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
cover_list = list(FileCoverage.query.filter(
FileCoverage.job_id == job.id
))
assert len(cover_list) == 2
@responses.activate
def test_sync_artifact_as_file(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/foo.bar',
body=SAMPLE_COVERAGE,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='foo.bar', data={
"displayPath": "foo.bar",
"fileName": "foo.bar",
"relativePath": "artifacts/foo.bar"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
class JenkinsIntegrationTest(BaseTestCase):
"""
This test should ensure a full cycle of tasks completes successfully within
the jenkins builder space.
"""
# it's possible for this test to infinitely hang due to continuous polling,
# so let's ensure we set a timeout
@pytest.mark.timeout(5)
@mock.patch('changes.config.redis.lock', mock.MagicMock())
@eager_tasks
@responses.activate
def test_full(self):
from changes.jobs.create_job import create_job
# TODO: move this out of this file and integrate w/ buildstep
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build/api/json/',
body='',
status=201)
responses.add(
responses.GET, 'http://jenkins.example.com/queue/api/xml/?wrapper=x&xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%2281d1596fd4d642f4a6bdf86c45e014e8%22%5D%2Fid',
body=self.load_fixture('fixtures/GET/queue_item_by_job_id.xml'),
match_querystring=True)
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_success.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'))
plan = self.create_plan()
plan.projects.append(self.project)
self.create_step(
plan, order=0, implementation='changes.backends.jenkins.buildstep.JenkinsBuildStep', data={
'job_name': 'server',
},
)
self.create_job_plan(job, plan)
job_id = job.id.hex
build_id = build.id.hex
create_job.delay(
job_id=job_id,
task_id=job_id,
parent_task_id=build_id,
)
job = Job.query.get(job_id)
assert job.status == Status.finished
assert job.result == Result.passed
assert job.date_created
assert job.date_started
assert job.date_finished
phase_list = job.phases
assert len(phase_list) == 1
assert phase_list[0].status == Status.finished
assert phase_list[0].result == Result.passed
assert phase_list[0].date_created
assert phase_list[0].date_started
assert phase_list[0].date_finished
step_list = phase_list[0].steps
assert len(step_list) == 1
assert step_list[0].status == Status.finished
assert step_list[0].result == Result.passed
assert step_list[0].date_created
assert step_list[0].date_started
assert step_list[0].date_finished
assert step_list[0].data == {
'item_id': '13',
'queued': False,
'log_offset': 7,
'job_name': 'server',
'build_no': 2,
'uri': 'https://jenkins.build.itc.dropbox.com/job/server/2/',
}
node = step_list[0].node
assert node.label == 'server-ubuntu-10.04 (ami-746cf244) (i-836023b7)'
assert [n.label for n in node.clusters] == ['server-runner']
source = LogSource.query.filter_by(job=job).first()
assert source.name == step_list[0].label
assert source.step == step_list[0]
assert source.project == self.project
assert source.date_created == job.date_started
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 7
assert chunks[0].text == 'Foo bar'
| {
"content_hash": "d9ed35a4442706e2a1f919ee852da16b",
"timestamp": "",
"source": "github",
"line_count": 845,
"max_line_length": 260,
"avg_line_length": 35.19644970414201,
"alnum_prop": 0.5763760465350862,
"repo_name": "alex/changes",
"id": "e1ca6cd81369263deb5f10806c40c309f0d6dfe7",
"size": "29741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/changes/backends/jenkins/test_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from django.contrib import admin
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Template
from django.urls import include, path
import treenav.urls
from ..admin import MenuItemAdmin
from ..models import MenuItem
admin.autodiscover()
# create a second Admin site and register MenuItem against it
site2 = admin.AdminSite(name="admin2")
site2.register(MenuItem, MenuItemAdmin)
def test_view(request, item_slug):
pslug = request.POST["pslug"]
N = request.POST["N"]
t = Template("{% load treenav_tags %}{% single_level_menu pslug N %}")
c = Context(
{
"request": request,
"pslug": pslug,
"N": N,
}
)
return HttpResponse(t.render(c))
def test_404(request, exception=None):
return HttpResponseNotFound()
handler404 = test_404 # noqa
urlpatterns = [
path("admin/", admin.site.urls),
path("admin2/", site2.urls),
path("item/<slug:item_slug>/$", test_view, name="test_view"),
path("old/", include(treenav.urls)),
]
| {
"content_hash": "0f218a8d42829c95bdb9d6fd13956f70",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 24.674418604651162,
"alnum_prop": 0.6672950047125353,
"repo_name": "caktus/django-treenav",
"id": "850c8920ca42db928d7fea4cfe529a40f5df4aec",
"size": "1061",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "treenav/tests/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3790"
},
{
"name": "Python",
"bytes": "53243"
}
],
"symlink_target": ""
} |
import glob
import logging
import os
import pyauto_functional # Must be imported before pyauto
import pyauto
class ThemesTest(pyauto.PyUITest):
"""TestCase for Themes."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Hit <enter> to dump info.. ')
self.pprint(self.GetThemeInfo())
def _SetThemeAndVerify(self, crx_file, theme_name):
"""Set theme and verify infobar appears and the theme name is correct.
Args:
crx_file: Path to .crx file to be set as theme.
theme_name: String to be compared to GetThemeInfo()['name'].
"""
# Starting infobar count is the number of non-themes infobars.
infobars = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
infobar_count = 0
for infobar in infobars:
if not (('text' in infobar) and
infobar['text'].startswith('Installed theme')):
infobar_count += 1
self.SetTheme(crx_file)
# Verify infobar shows up.
self.assertTrue(self.WaitForInfobarCount(infobar_count + 1))
self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars'])
# Verify theme name is correct.
self.assertEqual(theme_name, self.GetThemeInfo()['name'])
def testSetTheme(self):
"""Verify theme install."""
self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
def testThemeInFullScreen(self):
"""Verify theme can be installed in FullScreen mode."""
self.ApplyAccelerator(pyauto.IDC_FULLSCREEN)
self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
def testThemeReset(self):
"""Verify theme reset."""
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self.SetTheme(crx_file)
self.assertTrue(self.ResetToDefaultTheme())
self.assertFalse(self.GetThemeInfo())
def testThemeUndo(self):
"""Verify theme undo."""
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
# Undo theme install.
infobars = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
for index, infobar in enumerate(infobars):
if (('text' in infobar) and
infobar['text'].startswith('Installed theme')):
theme_index = index
break
self.PerformActionOnInfobar('cancel', infobar_index=theme_index)
self.assertFalse(self.GetThemeInfo())
def testThemeOverInstall(self):
"""Verify that can install a theme over an existing theme."""
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
# Install a different theme.
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme2.crx'))
self._SetThemeAndVerify(crx_file, 'snowflake theme')
def _ReturnCrashingThemes(self, themes, group_size, urls):
"""Install the given themes in groups of group_size and return the
group of themes that crashes (if any).
Note: restarts the browser at the beginning of the function.
Args:
themes: A list of themes to install.
group_size: The number of themes to install at one time.
urls: The list of urls to visit.
Returns:
Group of themes that crashed (if any).
"""
self.RestartBrowser()
curr_theme = 0
num_themes = len(themes)
while curr_theme < num_themes:
logging.debug('New group of %d themes.' % group_size)
group_end = curr_theme + group_size
this_group = themes[curr_theme:group_end]
# Apply each theme in this group.
for theme in this_group:
logging.debug('Applying theme: %s' % theme)
self.SetTheme(theme)
for url in urls:
self.NavigateToURL(url)
def _LogAndReturnCrashing():
logging.debug('Crashing themes: %s' % this_group)
return this_group
# Assert that there is at least 1 browser window.
try:
num_browser_windows = self.GetBrowserWindowCount()
except:
return _LogAndReturnCrashing()
else:
if not num_browser_windows:
return _LogAndReturnCrashing()
curr_theme = group_end
# None of the themes crashed.
return None
def Runner(self):
"""Apply themes; verify that theme has been applied and browser doesn't
crash.
This does not get run automatically. To run:
python themes.py themes.ThemesTest.Runner
Note: this test requires that a directory of crx files called 'themes'
exists in the data directory.
"""
themes_dir = os.path.join(self.DataDir(), 'themes')
urls_file = os.path.join(self.DataDir(), 'urls.txt')
assert os.path.exists(themes_dir), \
'The dir "%s" must exist' % os.path.abspath(themes_dir)
group_size = 20
num_urls_to_visit = 100
urls = [l.rstrip() for l in
open(urls_file).readlines()[:num_urls_to_visit]]
failed_themes = glob.glob(os.path.join(themes_dir, '*.crx'))
while failed_themes and group_size:
failed_themes = self._ReturnCrashingThemes(failed_themes, group_size,
urls)
group_size = group_size // 2
self.assertFalse(failed_themes,
'Theme(s) in failing group: %s' % failed_themes)
if __name__ == '__main__':
pyauto_functional.Main()
| {
"content_hash": "4ad786e54049e92c44dd91b867fb7fa8",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 79,
"avg_line_length": 33.645348837209305,
"alnum_prop": 0.6476585450146881,
"repo_name": "keishi/chromium",
"id": "e856f04c1de7fea918a9094c26c7ed3c685f283f",
"size": "5976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chrome/test/functional/themes.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "C",
"bytes": "67452317"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "132681259"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "19048"
},
{
"name": "Java",
"bytes": "361412"
},
{
"name": "JavaScript",
"bytes": "16603687"
},
{
"name": "Objective-C",
"bytes": "9609581"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "918683"
},
{
"name": "Python",
"bytes": "6407891"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Shell",
"bytes": "4192593"
},
{
"name": "Tcl",
"bytes": "277077"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class MediaAuthConfig(AppConfig):
name = "media_management_api.media_auth"
verbose_name = "media_management_api.media_auth"
| {
"content_hash": "2b0c7e3338eb26ba295c9fb270a608a8",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 52,
"avg_line_length": 28,
"alnum_prop": 0.7559523809523809,
"repo_name": "Harvard-ATG/media_management_api",
"id": "9089a31846645e43f3c4fba3cf33eb0280788921",
"size": "168",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "media_management_api/media_auth/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "414"
},
{
"name": "Python",
"bytes": "219422"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
} |
import re
import time
from datetime import datetime
import pytest
import requests.adapters
from responses import matchers
from mollie.api.client import Client, generate_querystring
from mollie.api.error import (
NotFoundError,
RequestError,
RequestSetupError,
ResponseError,
ResponseHandlingError,
UnauthorizedError,
UnprocessableEntityError,
)
from mollie.api.objects.method import Method
from mollie.api.objects.organization import Organization
from .utils import assert_list_object
@pytest.mark.parametrize(
"params, querystring",
[
({}, None),
({"locale": "nl_NL"}, "locale=nl_NL"),
({"locale": "nl_NL", "hoeba": "kek"}, "locale=nl_NL&hoeba=kek"),
({"amount": {"value": "100.00", "currency": "USD"}}, "amount%5Bvalue%5D=100.00&amount%5Bcurrency%5D=USD"),
],
)
def test_generate_querystring(params, querystring):
"""Verify that we can generate querystring that are correctly quoted."""
result = generate_querystring(params)
assert result == querystring
def test_client_querystring(client, response):
"""Verify that we are triggering the correct URL when using querystring with square brackets."""
response.add(
response.GET,
"https://api.mollie.com/v2/methods",
body=response._get_body("methods_list"),
match=[matchers.query_string_matcher("amount%5Bvalue%5D=100.00&amount%5Bcurrency%5D=USD")],
)
params = {"amount": {"currency": "USD", "value": "100.00"}}
methods = client.methods.list(**params)
assert_list_object(methods, Method)
def test_client_api_key():
"""Setting up a valid api key or access token should be possible."""
client = Client()
client.set_access_token("access_123")
assert client.api_key == "access_123"
client.set_api_key("live_123")
assert client.api_key == "live_123"
client.set_api_key("test_123")
assert client.api_key == "test_123"
def test_client_no_api_key():
"""A Request without an API key should raise an error."""
client = Client()
with pytest.raises(RequestSetupError, match="You have not set an API key."):
client.customers.list()
def test_client_invalid_api_key():
"""Setting up an invalid api key raises an error."""
client = Client()
with pytest.raises(RequestSetupError, match="Invalid API key: 'invalid'"):
client.set_api_key("invalid")
with pytest.raises(RequestSetupError, match="Invalid API key: 'access_123'"):
client.set_api_key("access_123")
with pytest.raises(RequestSetupError, match="Invalid access token: 'invalid'"):
client.set_access_token("invalid")
with pytest.raises(RequestSetupError, match="Invalid access token: 'live_123'"):
client.set_access_token("live_123")
with pytest.raises(RequestSetupError, match="Invalid access token: 'test_123'"):
client.set_access_token("test_123")
def test_client_broken_cert_bundle(monkeypatch):
"""A request should raise an error when the certificate bundle is not available.
Under circumstances it could be possible that the certifi package is not correctly installed, broken,
or just plain too old. Connecting to the Mollie API should fail with an error when the certificate
cannot be verified.
"""
monkeypatch.setenv("REQUESTS_CA_BUNDLE", "/does/not/exist")
client = Client()
client.set_api_key("test_test")
with pytest.raises(OSError) as excinfo:
client.customers.list()
assert "Could not find a suitable TLS CA certificate bundle, invalid path: /does/not/exist" in str(excinfo.value)
def test_client_generic_request_error(response, oauth_client):
"""When the remote server refuses connections or other request issues arise, an error should be raised.
The 'response' fixture blocks all outgoing connections, also when no actual responses are configured.
"""
client = Client()
client.set_api_key("test_test")
client.set_api_endpoint("https://api.mollie.invalid/")
with pytest.raises(RequestError, match="Unable to communicate with Mollie: Connection refused"):
client.customers.list()
# Same test, but for oauth-based requests
with pytest.raises(RequestError, match="Unable to communicate with Mollie: Connection refused"):
oauth_client.organizations.get("me")
def test_client_invalid_create_data(client):
"""Invalid data for a create command should raise an error."""
data = datetime.now()
with pytest.raises(RequestSetupError, match="Error encoding parameters into JSON"):
client.customers.create(data=data)
def test_client_invalid_update_data(client):
"""Invalid data for a create command should raise an error."""
data = datetime.now()
with pytest.raises(RequestSetupError, match="Error encoding parameters into JSON"):
client.customers.update("cst_12345", data=data)
def test_client_invalid_json_response(client, response):
"""An invalid json response should raise an error."""
response.get("https://api.mollie.com/v2/customers", "invalid_json")
with pytest.raises(ResponseHandlingError, match=r"Unable to decode Mollie API response \(status code: 200\)"):
client.customers.list()
@pytest.mark.parametrize(
"resp_payload, resp_status, exception, errormsg",
[
("error_unauthorized", 401, UnauthorizedError, "Missing authentication, or failed to authenticate"),
("customer_doesnotexist", 404, NotFoundError, "No customer exists with token cst_doesnotexist."),
("payment_rejected", 422, UnprocessableEntityError, "The amount is higher than the maximum"),
("error_teapot", 418, ResponseError, "Just an example error that is not explicitly supported"),
],
)
def test_client_get_received_error_response(client, response, resp_payload, resp_status, exception, errormsg):
"""An error response from the API should raise a matching error."""
response.get("https://api.mollie.com/v2/customers/cst_doesnotexist", resp_payload, status=resp_status)
with pytest.raises(exception, match=errormsg) as excinfo:
client.customers.get("cst_doesnotexist")
assert excinfo.value.status == resp_status
@pytest.mark.parametrize(
"resp_payload, resp_status, exception, errormsg",
[
("error_unauthorized", 401, UnauthorizedError, "Missing authentication, or failed to authenticate"),
("customer_doesnotexist", 404, NotFoundError, "No customer exists with token cst_doesnotexist."),
("error_teapot", 418, ResponseError, "Just an example error that is not explicitly supported"),
],
)
def test_client_delete_received_error_response(client, response, resp_payload, resp_status, exception, errormsg):
"""When deleting, an error response from the API should raise a matching error."""
response.delete("https://api.mollie.com/v2/customers/cst_doesnotexist", resp_payload, status=resp_status)
with pytest.raises(exception, match=errormsg) as excinfo:
client.customers.delete("cst_doesnotexist")
assert excinfo.value.status == resp_status
def test_client_response_404_but_no_payload(response):
"""An error response from the API should raise an error.
When the response returns an error, but no valid error data is available in the response,
we should still raise an error. The API v1 formatted error in the test is missing the required 'status' field.
"""
response.get("https://api.mollie.com/v3/customers", "v1_api_error", status=404)
client = Client()
client.api_version = "v3"
client.set_api_key("test_test")
with pytest.raises(ResponseHandlingError, match="Invalid API version"):
client.customers.list()
def test_client_error_including_field_response(client, response):
"""An error response containing a 'field' value should be reflected in the raised error."""
response.post("https://api.mollie.com/v2/payments", "payment_rejected", status=422)
data = {
"amount": {
"value": "10000000.00",
"currency": "EUR",
},
"method": "ideal",
"description": "My order",
"redirectUrl": "https://webshop.example.org/order/12345/",
"webhookUrl": "https://webshop.example.org/payments/webhook/",
}
with pytest.raises(UnprocessableEntityError, match="The amount is higher than the maximum") as excinfo:
client.payments.create(data)
assert excinfo.value.field == "amount"
def test_client_unicode_error(client, response):
"""An error response containing Unicode characters should also be processed correctly."""
response.post("https://api.mollie.com/v2/orders", "order_error", status=422)
with pytest.raises(UnprocessableEntityError) as err:
# actual POST data for creating an order can be found in test_orders.py
client.orders.create({})
# handling the error should work even when utf-8 characters (€) are in the response.
exception = err.value
expected = (
"Order line 1 is invalid. VAT amount is off. "
"Expected VAT amount to be €3.47 (21.00% over €20.00), got €3.10"
)
assert str(exception) == expected
def test_client_request_timeout(mocker, client):
"""Mock requests.request in the client to be able to read if the timeout is in the request call args."""
mocked_request = mocker.patch("mollie.api.client.requests.Session.request")
# Create a mocked response for the request
response = mocker.Mock(status_code=200)
response.headers.get.return_value = "application/hal+json"
response.json.return_value = {}
mocked_request.return_value = response
client.set_timeout(300)
client.payments.list()
assert mocked_request.call_args[1]["timeout"] == 300
def test_client_request_timed_out(mocker, client):
"""Timeout should raise a RequestError."""
mocker.patch(
"mollie.api.client.requests.Session.request",
side_effect=requests.exceptions.ReadTimeout(
"HTTPSConnectionPool(host='api.mollie.com', port=443): Read timed out. (read timeout=10)"
),
)
with pytest.raises(RequestError, match="Read timed out."):
client.payments.list()
def test_client_will_propagate_retry_setting(response):
response.get("https://api.mollie.com/v2/methods", "methods_list")
client = Client(retry=3)
client.set_api_key("test_test")
client.methods.list()
adapter = client._client.adapters["https://"]
assert adapter.max_retries.connect == 3
def test_client_version_is_pep440_compatible(client):
# PEP 440 specifies how python package versioning needs to look: https://peps.python.org/pep-0440
# Below is the regular expression from PEP 440, Appendix B, for canonical versions.
regex = r"^([1-9][0-9]*!)?(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?$" # noqa: E501
assert re.match(regex, client.CLIENT_VERSION), "Client version does not match PEP 440 specification"
def test_client_default_user_agent(client, response):
"""Default user-agent should contain some known values."""
version = re.escape(client.CLIENT_VERSION)
regex = re.compile(rf"^Mollie/{version} Python/[\w\.\+]+ OpenSSL/[\w\.]+$")
assert re.match(regex, client.user_agent)
# perform a request and inpect the actual used headers
response.get("https://api.mollie.com/v2/methods", "methods_list")
client.methods.list()
request = response.calls[0].request
assert re.match(regex, request.headers["User-Agent"])
def test_oauth_client_default_user_agent(oauth_client, response):
"""Default user-agent should contain some known values."""
version = re.escape(oauth_client.CLIENT_VERSION)
regex = re.compile(rf"^Mollie/{version} Python/[\w\.\+]+ OpenSSL/[\w\.]+ OAuth/2\.0$")
assert re.match(regex, oauth_client.user_agent)
# perform a request and inpect the actual used headers
response.get("https://api.mollie.com/v2/organizations/me", "organization_current")
oauth_client.organizations.get("me")
request = response.calls[0].request
assert re.match(regex, request.headers["User-Agent"])
def test_client_user_agent_with_access_token():
"""When authenticating with an access token, the User-Agent should contain an OAuth component."""
client = Client()
assert "OAuth".lower() not in client.user_agent.lower()
client.set_access_token("access_123")
assert "OAuth/2.0" in client.user_agent
def test_client_set_user_agent_component(response):
"""We should be able to add useragent components.
Note: we don't use the fixture client because it is shared between tests, and we don't want it
to be clobbered with random User-Agent strings.
"""
client = Client()
assert "Hoeba" not in client.user_agent
client.set_user_agent_component("Hoeba", "1.0.0")
assert "Hoeba/1.0.0" in client.user_agent
response.get("https://api.mollie.com/v2/methods", "methods_list")
client.set_api_key("test_123")
client.methods.list()
request = response.calls[0].request
assert "Hoeba/1.0.0" in request.headers["User-Agent"]
@pytest.mark.parametrize(
"key, expected",
[
("lowercase", "Lowercase"),
("UPPERCASE", "Uppercase"),
("multiple words", "MultipleWords"),
("multiple spaces", "MultipleSpaces"),
("trailing space ", "TrailingSpace"),
],
)
def test_client_set_user_agent_component_correct_key_syntax(key, expected):
"""When we receive UA component keys that don't adhere to the proposed syntax, they are corrected."""
client = Client()
client.set_user_agent_component(key, "1.0.0")
assert f"{expected}/1.0.0" in client.user_agent
@pytest.mark.parametrize(
"value, expected",
[
("1.2.3", "1.2.3"),
("singleword", "singleword"),
("MiXedCaSe", "MiXedCaSe"), # should be preserved
("UPPERCASE", "UPPERCASE"), # should be preserved
("with space", "with_space"),
("multiple spaces", "multiple_spaces"),
("trailing space ", "trailing_space"),
],
)
def test_client_set_user_agent_component_correct_value_syntax(value, expected):
"""When we receive UA component values that don't adhere to the proposed syntax, they are corrected."""
client = Client()
client.set_user_agent_component("Something", value)
assert f"Something/{expected}" in client.user_agent
def test_client_update_user_agent_component():
"""We should be able to update the User-Agent component when using the same key."""
client = Client()
client.set_user_agent_component("Test", "1.0.0")
assert "Test/1.0.0" in client.user_agent
# now update the component using the same key
client.set_user_agent_component("Test", "2.0.0")
assert "Test/2.0.0" in client.user_agent
assert "Test/1.0.0" not in client.user_agent
# and update with a key that will be converted to the same value
client.set_user_agent_component("TEST", "3.0.0")
assert "Test/3.0.0" in client.user_agent
assert "Test/2.0.0" not in client.user_agent
assert "Test/1.0.0" not in client.user_agent
def test_oauth_client_will_refresh_token_automatically(mocker, oauth_token, response):
"""Initializing the client with an expired token will trigger a token refresh automatically."""
# expire the token: set expiration time in the past.
oauth_token["expires_at"] = time.time() - 5
set_token_mock = mocker.Mock()
client = Client()
client.setup_oauth(
client_id="client_id",
client_secret="client_secret",
redirect_uri="https://example.com/callback",
scope=("organizations.read",),
token=oauth_token,
set_token=set_token_mock,
)
# setup two request mocks: the token refresh and the actual data request
response.post("https://api.mollie.com/oauth2/tokens", "token_single")
response.get("https://api.mollie.com/v2/organizations/me", "organization_current")
organization = client.organizations.get("me")
assert isinstance(organization, Organization), "Unexpected result from request."
assert response.assert_all_requests_are_fired, "Not all expected requests have been performed."
# verify handling of the new token
set_token_mock.assert_called_once()
args, kwargs = set_token_mock.call_args
assert isinstance(args[0], dict), "set_token() did not receive a dictionary."
def test_unauthorized_oauth_client_should_return_authorization_url(mocker, response):
set_token_mock = mocker.Mock()
client = Client()
is_authorized, authorization_url = client.setup_oauth(
client_id="client_id",
client_secret="client_secret",
redirect_uri="https://example.com/callback",
scope=("organizations.read",),
token=None,
set_token=set_token_mock,
)
assert not is_authorized, "A client without initial token should not be authorized"
assert authorization_url.startswith(
client.OAUTH_AUTHORIZATION_URL
), "A client without initial token should return a correct authorization url"
| {
"content_hash": "661a01c7ddc7b48f7a267041f460976b",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 154,
"avg_line_length": 40.02570093457944,
"alnum_prop": 0.6870001751211254,
"repo_name": "mollie/mollie-api-python",
"id": "8049da5341a47dc1cfca1d44e5eb3e735bc760dd",
"size": "17139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_api_client.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1357"
},
{
"name": "Python",
"bytes": "231732"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^', include('config.urls_api')),
url(r'^admin/', admin.site.urls),
]
| {
"content_hash": "855d4b2f209fcdfdf29eb8c4541ff34a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 25,
"alnum_prop": 0.68,
"repo_name": "tm-kn/farmers-api",
"id": "89fb44840abe4bb923b5e8784cefb884d80b3cda",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "farmers_api/config/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12715"
}
],
"symlink_target": ""
} |
"""Template task in which two jars need to be toppled over."""
import phyre.creator as creator_lib
__PLATFORM_X = [val * 0.05 for val in range(6, 16)]
__PLATFORM_Y = [val * 0.1 for val in range(0, 8)]
@creator_lib.define_task_template(
max_tasks=100,
platform1_x=__PLATFORM_X,
platform1_y=__PLATFORM_Y,
platform2_x=__PLATFORM_X,
platform2_y=__PLATFORM_Y,
search_params=dict(require_two_ball_solvable=True),
)
def build_task(C, platform1_x, platform1_y, platform2_x, platform2_y):
# Second platform must be to the right of the first one.
if platform1_x + 0.3 >= platform2_x:
raise creator_lib.SkipTemplateParams
# Platforms should not differ too much in height.
if abs(platform1_y - platform2_y) >= 0.3:
raise creator_lib.SkipTemplateParams
# Create two jars with balls in them (on a platform).
jar1, ball1 = _jar_with_ball(C, platform1_x, platform1_y, right=False)
jar2, ball2 = _jar_with_ball(C, platform2_x, platform2_y, right=True)
# Create task.
C.update_task(body1=ball1,
body2=ball2,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.TWO_BALLS)
def _jar_with_ball(C, x, y, right=False):
# Create platform with obstacle.
platform = C.add('static bar', scale=0.2) \
.set_bottom(y * C.scene.height) \
.set_center_x(x * C.scene.width)
obstacle = C.add('static bar', scale=0.02) \
.set_angle(90.0) \
.set_bottom(platform.top)
if right:
obstacle.set_right(platform.right)
else:
obstacle.set_left(platform.left)
# Create upside down jar.
offset = (platform.right - platform.left) / 2.0
offset += 0.04 * C.scene.width if right else -0.04 * C.scene.height
jar = C.add('dynamic jar', scale=0.2) \
.set_angle(146.0 if right else -146.0) \
.set_bottom(platform.top) \
.set_center_x(platform.left + offset)
# Add ball in jar.
offset = (jar.right - jar.left) * 0.7
ball = C.add('dynamic ball', scale=0.1) \
.set_bottom(jar.bottom) \
.set_center_x(jar.right - offset if right else jar.left + offset)
return jar, ball
| {
"content_hash": "fa69267418e6b4d70e35590a1e3d8f73",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 35.140625,
"alnum_prop": 0.6153846153846154,
"repo_name": "facebookresearch/phyre",
"id": "d3a8d90223ed0a5ccc3de86cf2bd3e590b4ba6f7",
"size": "2847",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "data/task_scripts/main/task00105.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "870"
},
{
"name": "C++",
"bytes": "150849"
},
{
"name": "CMake",
"bytes": "8704"
},
{
"name": "CSS",
"bytes": "3314"
},
{
"name": "Dockerfile",
"bytes": "2111"
},
{
"name": "HTML",
"bytes": "2147"
},
{
"name": "JavaScript",
"bytes": "52489"
},
{
"name": "Makefile",
"bytes": "2775"
},
{
"name": "Python",
"bytes": "653884"
},
{
"name": "Shell",
"bytes": "5674"
},
{
"name": "Thrift",
"bytes": "7384"
}
],
"symlink_target": ""
} |
from lintreview.review import Problems, Comment
from lintreview.tools.pep8 import Pep8
from unittest import TestCase
from tests import root_dir, read_file, read_and_restore_file, requires_image
class TestPep8(TestCase):
fixtures = [
'tests/fixtures/pep8/no_errors.py',
'tests/fixtures/pep8/has_errors.py',
]
def setUp(self):
self.problems = Problems()
self.tool = Pep8(self.problems, {}, root_dir)
def test_match_file(self):
self.assertFalse(self.tool.match_file('test.php'))
self.assertFalse(self.tool.match_file('test.js'))
self.assertFalse(self.tool.match_file('dir/name/test.js'))
self.assertTrue(self.tool.match_file('test.py'))
self.assertTrue(self.tool.match_file('dir/name/test.py'))
@requires_image('python2')
def test_process_files__one_file_pass(self):
self.tool.process_files([self.fixtures[0]])
self.assertEqual([], self.problems.all(self.fixtures[0]))
@requires_image('python2')
def test_process_files__one_file_fail(self):
self.tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
self.assertEqual(6, len(problems))
fname = self.fixtures[1]
expected = Comment(fname, 2, 2, 'E401 multiple imports on one line')
self.assertEqual(expected, problems[0])
expected = Comment(fname, 11, 11, "W603 '<>' is deprecated, use '!='")
self.assertEqual(expected, problems[5])
@requires_image('python2')
def test_process_files_two_files(self):
self.tool.process_files(self.fixtures)
self.assertEqual([], self.problems.all(self.fixtures[0]))
problems = self.problems.all(self.fixtures[1])
self.assertEqual(6, len(problems))
expected = Comment(self.fixtures[1], 2, 2,
'E401 multiple imports on one line')
self.assertEqual(expected, problems[0])
expected = Comment(self.fixtures[1], 11, 11,
"W603 '<>' is deprecated, use '!='")
self.assertEqual(expected, problems[5])
@requires_image('python2')
def test_process_files_two_files__python3(self):
self.tool.options['python'] = 3
self.tool.process_files(self.fixtures)
self.assertEqual([], self.problems.all(self.fixtures[0]))
problems = self.problems.all(self.fixtures[1])
assert len(problems) >= 6
self.assertEqual(2, problems[0].line)
self.assertEqual(2, problems[0].position)
self.assertIn('multiple imports on one line', problems[0].body)
@requires_image('python2')
def test_process_absolute_container_path(self):
fixtures = ['/src/' + path for path in self.fixtures]
self.tool.process_files(fixtures)
self.assertEqual([], self.problems.all(self.fixtures[0]))
problems = self.problems.all(self.fixtures[1])
assert len(problems) >= 6
@requires_image('python2')
def test_process_files__ignore(self):
options = {
'ignore': 'E2,W603'
}
self.tool = Pep8(self.problems, options, root_dir)
self.tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
self.assertEqual(4, len(problems))
for p in problems:
self.assertNotIn('E2', p.body)
self.assertNotIn('W603', p.body)
@requires_image('python2')
def test_process_files__line_length(self):
options = {
'max-line-length': '10'
}
self.tool = Pep8(self.problems, options, root_dir)
self.tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
self.assertEqual(10, len(problems))
expected = Comment(self.fixtures[1], 1, 1,
'E501 line too long (23 > 10 characters)')
self.assertEqual(expected, problems[0])
@requires_image('python2')
def test_process_files__select(self):
options = {
'select': 'W603'
}
self.tool = Pep8(self.problems, options, root_dir)
self.tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
self.assertEqual(1, len(problems))
for p in problems:
self.assertIn('W603', p.body)
def test_has_fixer__not_enabled(self):
tool = Pep8(self.problems, {})
self.assertEqual(False, tool.has_fixer())
def test_has_fixer__enabled(self):
tool = Pep8(self.problems, {'fixer': True})
self.assertEqual(True, tool.has_fixer())
@requires_image('python2')
def test_execute_fixer(self):
tool = Pep8(self.problems, {'fixer': True}, root_dir)
original = read_file(self.fixtures[1])
tool.execute_fixer(self.fixtures)
updated = read_and_restore_file(self.fixtures[1], original)
assert original != updated, 'File content should change.'
self.assertEqual(0, len(self.problems.all()),
'No errors should be recorded')
@requires_image('python2')
def test_execute_fixer__options(self):
tool = Pep8(self.problems, {
'fixer': True,
'max-line-length': 120,
'exclude': 'W201'
}, root_dir)
original = read_file(self.fixtures[1])
tool.execute_fixer(self.fixtures)
updated = read_and_restore_file(self.fixtures[1], original)
assert original != updated, 'File content should change.'
self.assertEqual(0, len(self.problems.all()),
'No errors should be recorded')
@requires_image('python2')
def test_execute_fixer__fewer_problems_remain(self):
tool = Pep8(self.problems, {'fixer': True}, root_dir)
# The fixture file can have all problems fixed by autopep8
original = read_file(self.fixtures[1])
tool.execute_fixer(self.fixtures)
tool.process_files(self.fixtures)
read_and_restore_file(self.fixtures[1], original)
self.assertGreaterEqual(len(self.problems.all()), 0,
'Most errors should be fixed')
@requires_image('python2')
def test_execute_fixer__python3(self):
options = {'fixer': True, 'python': 3}
tool = Pep8(self.problems, options, root_dir)
original = read_file(self.fixtures[1])
tool.execute_fixer(self.fixtures)
updated = read_and_restore_file(self.fixtures[1], original)
assert original != updated, 'File content should change.'
self.assertEqual(0, len(self.problems.all()),
'No errors should be recorded')
@requires_image('python2')
def test_execute_fixer__fewer_problems_remain__python3(self):
options = {'fixer': True, 'python': 3}
tool = Pep8(self.problems, options, root_dir)
# The fixture file can have all problems fixed by autopep8
original = read_file(self.fixtures[1])
tool.execute_fixer(self.fixtures)
tool.process_files(self.fixtures)
read_and_restore_file(self.fixtures[1], original)
self.assertLessEqual(1, len(self.problems.all()),
'Most errors should be fixed')
text = [c.body for c in self.problems.all()]
self.assertIn("'<>' is deprecated", ' '.join(text))
| {
"content_hash": "0f3db8f38a5fad6fdd615911d0c3579d",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 78,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6114718614718615,
"repo_name": "markstory/lint-review",
"id": "b9f862039848ef439f1cfa18acbb7d1990fb6191",
"size": "7392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tools/test_pep8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7815"
},
{
"name": "Makefile",
"bytes": "1499"
},
{
"name": "Python",
"bytes": "406375"
},
{
"name": "Ruby",
"bytes": "136"
},
{
"name": "Shell",
"bytes": "4388"
}
],
"symlink_target": ""
} |
default_app_config = 'wagtail_embed_videos.apps.WagtailEmbedVideosAppConfig'
| {
"content_hash": "b598018efdf7dc383d99dd8bf020da0d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 76,
"avg_line_length": 77,
"alnum_prop": 0.8441558441558441,
"repo_name": "infoportugal/wagtail-embedvideos",
"id": "de6feab266339ab41abd82d0e064bdd7429c53e0",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail_embed_videos/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "15192"
},
{
"name": "JavaScript",
"bytes": "3815"
},
{
"name": "Python",
"bytes": "32852"
}
],
"symlink_target": ""
} |
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
import collections
import threading
import grpc
from grpc import _common
from grpc.beta import _metadata
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import stream
from grpc.framework.interfaces.face import face
# pylint: disable=too-many-return-statements
_DEFAULT_POOL_SIZE = 8
class _ServerProtocolContext(interfaces.GRPCServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def peer(self):
return self._servicer_context.peer()
def disable_next_response_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _FaceServicerContext(face.ServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def is_active(self):
return self._servicer_context.is_active()
def time_remaining(self):
return self._servicer_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
raise NotImplementedError(
'add_abortion_callback no longer supported server-side!')
def cancel(self):
self._servicer_context.cancel()
def protocol_context(self):
return _ServerProtocolContext(self._servicer_context)
def invocation_metadata(self):
return _metadata.beta(self._servicer_context.invocation_metadata())
def initial_metadata(self, initial_metadata):
self._servicer_context.send_initial_metadata(
_metadata.unbeta(initial_metadata))
def terminal_metadata(self, terminal_metadata):
self._servicer_context.set_terminal_metadata(
_metadata.unbeta(terminal_metadata))
def code(self, code):
self._servicer_context.set_code(code)
def details(self, details):
self._servicer_context.set_details(details)
def _adapt_unary_request_inline(unary_request_inline):
def adaptation(request, servicer_context):
return unary_request_inline(request,
_FaceServicerContext(servicer_context))
return adaptation
def _adapt_stream_request_inline(stream_request_inline):
def adaptation(request_iterator, servicer_context):
return stream_request_inline(request_iterator,
_FaceServicerContext(servicer_context))
return adaptation
class _Callback(stream.Consumer):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._terminated = False
self._cancelled = False
def consume(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def terminate(self):
with self._condition:
self._terminated = True
self._condition.notify_all()
def consume_and_terminate(self, value):
with self._condition:
self._values.append(value)
self._terminated = True
self._condition.notify_all()
def cancel(self):
with self._condition:
self._cancelled = True
self._condition.notify_all()
def draw_one_value(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._values:
return self._values.pop(0)
elif self._terminated:
return None
else:
self._condition.wait()
def draw_all_values(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._terminated:
all_values = tuple(self._values)
self._values = None
return all_values
else:
self._condition.wait()
def _run_request_pipe_thread(request_iterator, request_consumer,
servicer_context):
thread_joined = threading.Event()
def pipe_requests():
for request in request_iterator:
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.consume(request)
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.terminate()
request_pipe_thread = threading.Thread(target=pipe_requests)
request_pipe_thread.daemon = True
request_pipe_thread.start()
def _adapt_unary_unary_event(unary_unary_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_unary_event(request, callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
return callback.draw_all_values()[0]
return adaptation
def _adapt_unary_stream_event(unary_stream_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_stream_event(request, callback,
_FaceServicerContext(servicer_context))
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
def _adapt_stream_unary_event(stream_unary_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_unary_event(
callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
return callback.draw_all_values()[0]
return adaptation
def _adapt_stream_stream_event(stream_stream_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_stream_event(
callback, _FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
class _SimpleMethodHandler(
collections.namedtuple('_MethodHandler', (
'request_streaming',
'response_streaming',
'request_deserializer',
'response_serializer',
'unary_unary',
'unary_stream',
'stream_unary',
'stream_stream',
)), grpc.RpcMethodHandler):
pass
def _simple_method_handler(implementation, request_deserializer,
response_serializer):
if implementation.style is style.Service.INLINE:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(False, False, request_deserializer,
response_serializer,
_adapt_unary_request_inline(
implementation.unary_unary_inline),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(False, True, request_deserializer,
response_serializer, None,
_adapt_unary_request_inline(
implementation.unary_stream_inline),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_request_inline(
implementation.stream_unary_inline),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_request_inline(
implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(False, False, request_deserializer,
response_serializer,
_adapt_unary_unary_event(
implementation.unary_unary_event),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(False, True, request_deserializer,
response_serializer, None,
_adapt_unary_stream_event(
implementation.unary_stream_event),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_unary_event(
implementation.stream_unary_event),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(True, True, request_deserializer,
response_serializer, None, None, None,
_adapt_stream_stream_event(
implementation.stream_stream_event))
def _flatten_method_pair_map(method_pair_map):
method_pair_map = method_pair_map or {}
flat_map = {}
for method_pair in method_pair_map:
method = _common.fully_qualified_method(method_pair[0], method_pair[1])
flat_map[method] = method_pair_map[method_pair]
return flat_map
class _GenericRpcHandler(grpc.GenericRpcHandler):
def __init__(self, method_implementations, multi_method_implementation,
request_deserializers, response_serializers):
self._method_implementations = _flatten_method_pair_map(
method_implementations)
self._request_deserializers = _flatten_method_pair_map(
request_deserializers)
self._response_serializers = _flatten_method_pair_map(
response_serializers)
self._multi_method_implementation = multi_method_implementation
def service(self, handler_call_details):
method_implementation = self._method_implementations.get(
handler_call_details.method)
if method_implementation is not None:
return _simple_method_handler(method_implementation,
self._request_deserializers.get(
handler_call_details.method),
self._response_serializers.get(
handler_call_details.method))
elif self._multi_method_implementation is None:
return None
else:
try:
return None #TODO(nathaniel): call the multimethod.
except face.NoSuchMethodError:
return None
class _Server(interfaces.Server):
def __init__(self, grpc_server):
self._grpc_server = grpc_server
def add_insecure_port(self, address):
return self._grpc_server.add_insecure_port(address)
def add_secure_port(self, address, server_credentials):
return self._grpc_server.add_secure_port(address, server_credentials)
def start(self):
self._grpc_server.start()
def stop(self, grace):
return self._grpc_server.stop(grace)
def __enter__(self):
self._grpc_server.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._grpc_server.stop(None)
return False
def server(service_implementations, multi_method_implementation,
request_deserializers, response_serializers, thread_pool,
thread_pool_size):
generic_rpc_handler = _GenericRpcHandler(
service_implementations, multi_method_implementation,
request_deserializers, response_serializers)
if thread_pool is None:
effective_thread_pool = logging_pool.pool(_DEFAULT_POOL_SIZE
if thread_pool_size is None
else thread_pool_size)
else:
effective_thread_pool = thread_pool
return _Server(
grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
| {
"content_hash": "08dfcb250b1845e46dc183b7b56eaa36",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 82,
"avg_line_length": 37.44266666666667,
"alnum_prop": 0.587422548251549,
"repo_name": "simonkuang/grpc",
"id": "ccafec8951d1d14889b5826d51fd3beb60c63e4d",
"size": "14618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio/grpc/beta/_server_adaptations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25583"
},
{
"name": "C",
"bytes": "6254103"
},
{
"name": "C#",
"bytes": "1483282"
},
{
"name": "C++",
"bytes": "1808713"
},
{
"name": "CMake",
"bytes": "77882"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "353380"
},
{
"name": "M4",
"bytes": "38331"
},
{
"name": "Makefile",
"bytes": "734953"
},
{
"name": "Objective-C",
"bytes": "309837"
},
{
"name": "PHP",
"bytes": "152017"
},
{
"name": "Protocol Buffer",
"bytes": "114660"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1247734"
},
{
"name": "Ruby",
"bytes": "620513"
},
{
"name": "Shell",
"bytes": "59691"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
import flask
from flask.testing import FlaskClient
def test_get_index(app: flask.app.Flask, client: FlaskClient) -> None:
res = client.get("/")
assert res.status_code == 200
def test_post_index(app: flask.app.Flask, client: FlaskClient) -> None:
res = client.post("/")
assert res.status_code == 405
| {
"content_hash": "9fca23cbd6e58556f7ba58419bc8d19c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 26.583333333333332,
"alnum_prop": 0.6833855799373041,
"repo_name": "GoogleCloudPlatform/cloud-run-microservice-template-python",
"id": "89a921967eca87617263dba06f414744520eefe7",
"size": "894",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1531"
},
{
"name": "Procfile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "13716"
},
{
"name": "Shell",
"bytes": "2101"
}
],
"symlink_target": ""
} |
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
if __name__ == '__main__':
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(['easy_pdf', 'tests'])
sys.exit(bool(failures))
| {
"content_hash": "18e6a5170899e5151ac88f3d8af70b22",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.6894736842105263,
"repo_name": "nigma/django-easy-pdf",
"id": "ed27485d8c392113af2b7701e847271d105828a1",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7018"
},
{
"name": "Makefile",
"bytes": "2468"
},
{
"name": "Python",
"bytes": "17249"
}
],
"symlink_target": ""
} |
import os
import testinfra.utils.ansible_runner
import pytest
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize("package", [
"zsh",
])
def test_packages(host, package):
p = host.package(package)
assert p.is_installed
@pytest.mark.parametrize("name", [
"lorem",
"ipsum",
])
def test_user_shell(host, name):
u = host.user(name)
assert 'zsh' in u.shell
@pytest.mark.parametrize("name", [
"lorem",
"ipsum",
])
def test_oh_my_zsh_install(host, name):
d = host.file("/home/{0}/.oh-my-zsh".format(name))
assert d.exists and d.is_directory
@pytest.mark.parametrize("name", [
"lorem",
])
def test_zshrc_create(host, name):
f = host.file("/home/{0}/.zshrc".format(name))
assert f.exists and f.is_file
assert "export ZSH=/home/{0}/.oh-my-zsh".format(name) in f.content_string
assert "plugins=(autojump git)" in f.content_string
@pytest.mark.parametrize("user,setting", [
("lorem", "PLATFORMSH_CLI_TOKEN=10987654321"),
("ipsum", "ls -AF"),
])
def test_zshrc_settings(host, user, setting):
f = host.file("/home/{0}/.zshrc".format(user))
assert setting in f.content_string
| {
"content_hash": "b8c7c6ea125279cbb052db9c50c42a35",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 21.655172413793103,
"alnum_prop": 0.6552547770700637,
"repo_name": "ctorgalson/ansible-role-oh-my-zsh",
"id": "15ef0569e92b98e8fff8f194974188c1a2a2cd58",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "molecule/default/tests/test_default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1174"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from google.appengine.ext import ndb
# import do not work yet (for unit test)
# add them later
from api import fields
import model
import util
import config
#TODO import Taggable
#class WayPoint(model.Base): # does not work with unit test yet
from .tag import Taggable#, TagStructure, Tag, TagRelation
from .collection import Collection, AddCollection
class WayPoint(Taggable, AddCollection, model.Base):
name = ndb.StringProperty(required=True)
description = ndb.TextProperty()
url = ndb.StringProperty(validator=lambda p, v: v.lower())
geo = ndb.GeoPtProperty(indexed=True) # lat/long coordinates
custom_fields = ndb.GenericProperty(repeated=True)
creator = ndb.KeyProperty(kind="User") # default: current user key
@classmethod
def qry(cls, name=None, collection=None, tag=None, \
url=None, order_by_date='modified', **kwargs):
"""Query for way points"""
qry = cls.query(**kwargs)
if name:
qry_tmp = qry
qry = qry.filter(cls.name==name)
if collection:
qry_tmp = qry
qry = qry.filter(cls.collection==collection)
if tag:
qry_tmp = qry
qry = qry.filter(cls.tags==tag)
if url:
qry_tmp = qry
qry = qry.filter(cls.url==url.lower())
if order_by_date == 'modified':
qry_tmp = qry
qry = qry.order(-cls.modified)
elif order_by_date == 'created':
qry_tmp = qry
qry = qry.order(-cls.created)
#else filter for private True and False
return qry
@classmethod
def get_dbs(
cls, name=None,
tags=None, creator=None, geo=None, **kwargs
):
kwargs = cls.get_col_dbs(**kwargs)
kwargs = cls.get_tag_dbs(**kwargs)
return super(WayPoint, cls).get_dbs(
name=name or util.param('name', str),
creator=creator or util.param('creator', ndb.Key),
geo=geo or util.param('geo', str),
**kwargs
)
@staticmethod
def print_list(dbs):
print "\n+-------------------+-------------------+-------------------+"\
+"-------------------+-------------------+-----------------------"
print "| {:<18}| {:<18}| {:<18}| {:<18}| {:<18}| {:<14} {:<48}".\
format("name", "collection", "description", "url", "geo", "tags", "custom field")
print "+-------------------+-------------------+-------------------+"\
+"-------------------+-------------------+-----------------------"
for db in dbs:
print "| {:<18}| {:<18}| {:<18}| {:<18}| {:<18}|{:<14} {:<48}".\
format(db.name, db.collection, db.description, db.url, db.geo,
db.tags,db.custom_fields)
print "+-------------------+-------------------+-------------------+"\
+"-------------------+-------------------+-----------------------"
print
print
# ADD them later
# @classmethod
# def get_dbs(
# cls, admin=None, active=None, verified=None, permissions=None, **kwargs
# ):
# return super(User, cls).get_dbs(
# admin=admin or util.param('admin', bool),
# active=active or util.param('active', bool),
# verified=verified or util.param('verified', bool),
# permissions=permissions or util.param('permissions', list),
# **kwargs
# )
#
#
# FIELDS = {
# }
#
# FIELDS.update(model.Base.FIELDS)
| {
"content_hash": "18e168b08edf21587559ab4a04275f28",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 89,
"avg_line_length": 32.22549019607843,
"alnum_prop": 0.540614542135686,
"repo_name": "wodore/wodore-gae",
"id": "84ce6d5961933ecb41817fc67379df88983f6762",
"size": "3304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/model/waypoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5926"
},
{
"name": "CoffeeScript",
"bytes": "28017"
},
{
"name": "HTML",
"bytes": "116770"
},
{
"name": "JavaScript",
"bytes": "65"
},
{
"name": "Python",
"bytes": "290702"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from random import randrange
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
#if req.get("result").get("action") != "yahooWeatherForecast":
# return {}
#baseurl = "https://query.yahooapis.com/v1/public/yql?"
#yql_query = makeYqlQuery(req)
#if yql_query is None:
# return {}
#yql_url = baseurl + urllib.urlencode({'q': yql_query}) + "&format=json"
#result = urllib.urlopen(yql_url).read()
#data = json.loads(result)
#res = makeWebhookResult(data)
#return res
#data = getAlphabet(req)
list1 = ['Give one more alphabet', 'Say one more letter', 'What is your next alphabet', 'Ok give the next alphabet']
random_number = randrange(4) # from 0 to 9 and 10 is not included
rndString = list1[random_number]
data = getAlphabet(req)
print ("Netaji")
print(data)
return {
"speech": data + " " +rndString,
"displayText": "test",
# "data": data,
# "contextOut": [],
"source": "mysource"
}
def getAlphabet(req):
result = req.get("result")
parameters = result.get("parameters")
alphabet = parameters.get("user-alphabet")
lowerAlphabet = alphabet.upper()
returnValue=""
list2 = ['Next alphabet is ','Next letter is ']
random_number = randrange(2) # from 0 to 9 and 10 is not included
rndString = list2[random_number]
if lowerAlphabet == 'A':
returnValue = 'B'
elif lowerAlphabet == 'B':
returnValue = 'C'
elif lowerAlphabet == 'C':
returnValue = 'D'
elif lowerAlphabet == 'D':
returnValue = 'E'
elif lowerAlphabet == 'E':
returnValue = 'F'
elif lowerAlphabet == 'F':
returnValue = 'G'
elif lowerAlphabet == 'G':
returnValue = 'H'
elif lowerAlphabet == 'H':
returnValue = 'I'
elif lowerAlphabet == 'I':
returnValue = 'J'
elif lowerAlphabet == 'J':
returnValue = 'K'
elif lowerAlphabet == 'K':
returnValue = 'L'
elif lowerAlphabet == 'L':
returnValue = 'M'
elif lowerAlphabet == 'M':
returnValue = 'N'
elif lowerAlphabet == 'N':
returnValue = 'O'
elif lowerAlphabet == 'O':
returnValue = 'P'
elif lowerAlphabet == 'P':
returnValue = 'Q'
elif lowerAlphabet == 'Q':
returnValue = 'R'
elif lowerAlphabet == 'R':
returnValue = 'S'
elif lowerAlphabet == 'S':
returnValue = 'T'
elif lowerAlphabet == 'T':
returnValue = 'U'
elif lowerAlphabet == 'U':
returnValue = 'V'
elif lowerAlphabet == 'V':
returnValue = 'W'
elif lowerAlphabet == 'W':
returnValue = 'X'
elif lowerAlphabet == 'X':
returnValue = 'Y'
elif lowerAlphabet == 'Y':
returnValue = 'Z'
elif lowerAlphabet == 'Z':
returnValue = 'picked the last one. Ok Ill Start C'
else:
returnValue = 'I did not understand. I am starting with B'
if lowerAlphabet != 'Z':
return rndString + " " + returnValue + "."
else:
return returnValue
#return alphabet
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
", the temperature is " + condition.get('temp') + " " + units.get('temperature')
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
| {
"content_hash": "c6affaaba1fab1495f1d2bf90619c717",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 120,
"avg_line_length": 27.302702702702703,
"alnum_prop": 0.5810730548406257,
"repo_name": "netajibasa/simplealphabets",
"id": "5bea9ebd89d32c17d50947fe2dc57937f5fe19d9",
"size": "5074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5074"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Inventory', '0013_auto_20151227_1230'),
]
operations = [
migrations.RenameField(
model_name='ecoresproductmodel',
old_name='DefaulLocation',
new_name='DefaultLocation',
),
]
| {
"content_hash": "0571d88878d7c819b4c6c0c575f72397",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 21.88888888888889,
"alnum_prop": 0.6091370558375635,
"repo_name": "alexharmenta/Inventationery",
"id": "68f127669ceb199265353cfaba5517c484d6344d",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Inventationery/apps/Inventory/migrations/0014_auto_20151227_1250.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127726"
},
{
"name": "HTML",
"bytes": "170879"
},
{
"name": "JavaScript",
"bytes": "118056"
},
{
"name": "Python",
"bytes": "243110"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import multiple_form
from .autocomplete_light_registry import CustomAutocomplete
from .models import ModelOne, ModelTwo
urlpatterns = [
url(r'^$', multiple_form, name='select2_outside_admin_multiple'),
# Autocomplete urls
url(
r'^modelone-autocomplete/$',
CustomAutocomplete.as_view(
model=ModelOne,
create_field='name',
),
name='modelone-autocomplete',
),
url(
r'^modeltwo-autocomplete/$',
CustomAutocomplete.as_view(
model=ModelTwo,
create_field='name'),
name='modeltwo-autocomplete',
),
]
| {
"content_hash": "2bfc70a928862e6bf3af5ef5c30e25c3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 69,
"avg_line_length": 25.692307692307693,
"alnum_prop": 0.6182634730538922,
"repo_name": "shubhamdipt/django-autocomplete-light",
"id": "a563c64157f6723be8722d91abf3897d4511c1f3",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/select2_outside_admin_multiple/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "166"
},
{
"name": "HTML",
"bytes": "5154"
},
{
"name": "JavaScript",
"bytes": "7796"
},
{
"name": "Python",
"bytes": "161638"
},
{
"name": "Shell",
"bytes": "1025"
}
],
"symlink_target": ""
} |
from itertools import chain
from typing import Dict, Iterable, NewType, Union
from attr import attrib, attrs, evolve
from polygraph.types import (
DeferredType,
PolygraphField,
PolygraphInputValue,
PolygraphType,
)
from polygraph.utils.strict_dict import StrictDict
TypeName = NewType("TypeName", str)
TypeMap = NewType("TypeMap", Dict[TypeName, PolygraphType])
UnresolvedType = NewType("UnresolvedType", Union[PolygraphType, DeferredType])
@attrs
class Schema:
query = attrib()
mutation = attrib()
def visit_types(types: Iterable[UnresolvedType], visited=None):
"""
Recursively walks over all types in a depth-first manner
"""
visited = visited or set()
for type_ in (t for t in types if t not in visited):
if isinstance(type_, DeferredType):
continue
yield type_
visited.add(type_)
next_types = (t for t in chain(
[field.type_ for field in type_.fields],
[field.type_ for field in type_.input_fields],
type_.interfaces,
type_.possible_types,
[type_.of_type] if type_.of_type else [],
) if t not in visited)
yield from visit_types(next_types, visited)
def collect_type_names(types: Iterable[UnresolvedType]) -> TypeMap:
"""
Builds a mapping of type names to types
"""
return StrictDict({
type_.name: type_ for type_ in visit_types(types)
if not isinstance(type_, DeferredType)
})
def undefer_type(type_: UnresolvedType, type_map: TypeMap) -> PolygraphType:
if isinstance(type_, DeferredType):
return type_map.get(type_.name)
else:
return type_
def undefer_input_value(
input_value: PolygraphInputValue,
type_map: TypeMap,
) -> PolygraphInputValue:
return evolve(
input_value,
type_=undefer_type(input_value.type_, type_map),
)
def undefer_field(field: PolygraphField, type_map: TypeMap) -> PolygraphField:
return evolve(
field,
type_=undefer_type(field.type_, type_map),
args=tuple(undefer_input_value(v) for v in field.args),
)
def undefer_subtypes(type_: UnresolvedType, type_map: TypeMap) -> PolygraphType:
type_ = undefer_type(type_, type_map)
return evolve(
type_,
fields=tuple(undefer_field(f, type_map) for f in type_.fields),
interfaces=tuple(undefer_subtypes(i, type_map) for i in type_.interfaces),
possible_types=tuple(undefer_subtypes(p, type_map) for p in type_.possible_types),
of_type=undefer_type(type_.of_type, type_map),
)
def build_schema(
query_type,
mutation_type: PolygraphType=None,
additional_types: Iterable[PolygraphType]=None,
) -> Schema:
types = additional_types or []
types.append(query_type)
if mutation_type:
types.append(mutation_type)
type_map = collect_type_names(types)
types = [undefer_subtypes(t, type_map) for t in types]
return Schema(
query=undefer_subtypes(query_type),
mutation=undefer_subtypes(mutation_type),
)
| {
"content_hash": "0462ef40aa8170138b8677e66af8c20d",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 90,
"avg_line_length": 28.80373831775701,
"alnum_prop": 0.6537962362102531,
"repo_name": "polygraph-python/polygraph",
"id": "3d4706d8067b2fa7975cb2aa9df27b2ad9f3a759",
"size": "3082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polygraph/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11865"
}
],
"symlink_target": ""
} |
from playlist.models import SlackUser, Song
import simplejson as json
import pickle
from Queue import Queue
class PlaylistService:
def __init__(self, reco_service):
self.reco_service = reco_service
self.load()
def enqueue(self, song):
self.load()
self.reco_service.next_reco_for(song)
self.queue.append(song)
self.dump()
def dequeue(self, song):
self.load()
self.queue.remove(song)
self.dump()
def get_next(self):
self.load()
if len(self.queue) is 0:
el = self.reco_service.get_reco()
else:
el = self.queue[0]
self.queue.remove(el)
self.dump()
return el
def dump(self):
print 'saving to lemon'
fh = open('lemon.pickle', 'wb')
pickle.dump(self.queue, fh)
def load(self):
print 'loading from lemon'
fh = open('lemon.pickle', 'r')
try:
self.queue = pickle.load(fh)
except:
self.queue = []
self.dump()
class RepeatRecommendationService:
def __init__(self):
self.load()
def next_reco_for(self, song):
self.load()
self.songs.append(song)
self.dump()
def get_reco(self):
self.load()
el = self.songs[self.offset]
if self.offset == len(self.songs) - 1:
self.offset = 0
else:
self.offset += 1
self.dump()
return el
def dump(self):
print 'saving to mango and chili'
fh = open('mango.pickle', 'wb')
fh1 = open('chilli.pickle', 'wb')
pickle.dump(self.songs, fh)
pickle.dump(self.offset, fh1)
def load(self):
print 'loading from mango and chilli'
fh = open('mango.pickle', 'r')
fh1 = open('chilli.pickle', 'r')
try:
self.songs = pickle.load(fh)
self.offset = pickle.load(fh1)
except:
self.songs = []
self.offset = 0
self.dump()
reco_service = RepeatRecommendationService()
playlist_service = PlaylistService(reco_service)
| {
"content_hash": "80997ca5c400ec1ac38aa5d2d3a20256",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 48,
"avg_line_length": 23.966666666666665,
"alnum_prop": 0.5373203523412147,
"repo_name": "schatten/radioslack",
"id": "57d4a55b6235cd4012284df5d98e0456dcff9d68",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playlist/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14835"
}
],
"symlink_target": ""
} |
"""imake_web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "18f40f4c97fc42bb8d2b24d84b9a4410",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 36.142857142857146,
"alnum_prop": 0.6982872200263505,
"repo_name": "chanita/imake",
"id": "b14c5264cf1a6d62af5228c7a45a3b8c254c345c",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imake_web/imake_web/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4053"
}
],
"symlink_target": ""
} |
import contextlib
import copy
import mock
from webob import exc
from quantum.api import extensions
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.common import constants
from quantum import context
from quantum.db import agents_db
from quantum.db import dhcp_rpc_base
from quantum.db import l3_rpc_base
from quantum.extensions import agentscheduler
from quantum import manager
from quantum.openstack.common import timeutils
from quantum.openstack.common import uuidutils
from quantum.tests.unit import test_agent_ext_plugin
from quantum.tests.unit import test_db_plugin as test_plugin
from quantum.tests.unit import test_extensions
from quantum.tests.unit import test_l3_plugin
from quantum.tests.unit.testlib_api import create_request
from quantum.wsgi import Serializer
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
class AgentSchedulerTestMixIn(object):
def _request_list(self, path, admin_context=True,
expected_code=exc.HTTPOk.code):
req = self._path_req(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _path_req(self, path, method='GET', data=None,
query_string=None,
admin_context=True):
content_type = 'application/%s' % self.fmt
body = None
if data is not None: # empty dict is valid
body = Serializer().serialize(data, content_type)
if admin_context:
return create_request(
path, body, content_type, method, query_string=query_string)
else:
return create_request(
path, body, content_type, method, query_string=query_string,
context=context.Context('', 'tenant_id'))
def _path_create_request(self, path, data, admin_context=True):
return self._path_req(path, method='POST', data=data,
admin_context=admin_context)
def _path_show_request(self, path, admin_context=True):
return self._path_req(path, admin_context=admin_context)
def _path_delete_request(self, path, admin_context=True):
return self._path_req(path, method='DELETE',
admin_context=admin_context)
def _path_update_request(self, path, data, admin_context=True):
return self._path_req(path, method='PUT', data=data,
admin_context=admin_context)
def _list_routers_hosted_by_l3_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
agentscheduler.L3_ROUTERS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_networks_hosted_by_dhcp_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
agentscheduler.DHCP_NETS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_l3_agents_hosting_router(self, router_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/routers/%s/%s.%s" % (router_id,
agentscheduler.L3_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_dhcp_agents_hosting_network(self, network_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/networks/%s/%s.%s" % (network_id,
agentscheduler.DHCP_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _add_router_to_l3_agent(self, id, router_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
agentscheduler.L3_ROUTERS,
self.fmt)
req = self._path_create_request(path,
{'router_id': router_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _add_network_to_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
agentscheduler.DHCP_NETS,
self.fmt)
req = self._path_create_request(path,
{'network_id': network_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_network_from_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
agentscheduler.DHCP_NETS,
network_id,
self.fmt)
req = self._path_delete_request(path,
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_router_from_l3_agent(self, id, router_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
agentscheduler.L3_ROUTERS,
router_id,
self.fmt)
req = self._path_delete_request(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _register_one_agent_state(self, agent_state):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent_state},
time=timeutils.strtime())
def _disable_agent(self, agent_id, admin_state_up=False):
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = admin_state_up
self._update('agents', agent_id, new_agent)
def _get_agent_id(self, agent_type, host):
agents = self._list_agents()
for agent in agents['agents']:
if (agent['agent_type'] == agent_type and
agent['host'] == host):
return agent['id']
class OvsAgentSchedulerTestCase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.QuantumDbPluginV2TestCase):
fmt = 'json'
plugin_str = ('quantum.plugins.openvswitch.'
'ovs_quantum_plugin.OVSQuantumPluginV2')
def setUp(self):
super(OvsAgentSchedulerTestCase, self).setUp(self.plugin_str)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
self.agentscheduler_dbMinxin = manager.QuantumManager.get_plugin()
def test_report_states(self):
self._register_agent_states()
agents = self._list_agents()
self.assertEqual(4, len(agents['agents']))
def test_network_scheduling_on_network_creation(self):
self._register_agent_states()
with self.network() as net:
dhcp_agents = self._list_dhcp_agents_hosting_network(
net['network']['id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_auto_schedule_with_disabled(self):
with contextlib.nested(self.network(),
self.network()):
dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
self._disable_agent(hosta_id)
dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA)
# second agent will host all the networks since first is disabled.
dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTC)
networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(networks['networks'])
networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(networks['networks'])
self.assertEqual(0, num_hosta_nets)
self.assertEqual(2, num_hostc_nets)
def test_network_auto_schedule_with_hosted(self):
# one agent hosts all the networks, other hosts none
with contextlib.nested(self.network(),
self.network()) as (net1, net2):
dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin()
self._register_agent_states()
dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA)
# second agent will not host the network since first has got it.
dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTC)
dhcp_agents = self._list_dhcp_agents_hosting_network(
net1['network']['id'])
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(hosta_nets['networks'])
hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(hostc_nets['networks'])
self.assertEqual(2, num_hosta_nets)
self.assertEqual(0, num_hostc_nets)
self.assertEqual(1, len(dhcp_agents['agents']))
self.assertEqual(DHCP_HOSTA, dhcp_agents['agents'][0]['host'])
def test_network_auto_schedule_with_hosted_2(self):
# one agent hosts one network
dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin()
dhcp_hosta = {
'binary': 'quantum-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
dhcp_hostc = copy.deepcopy(dhcp_hosta)
dhcp_hostc['host'] = DHCP_HOSTC
with self.network() as net1:
self._register_one_agent_state(dhcp_hosta)
dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
with self.network() as net2:
self._register_one_agent_state(dhcp_hostc)
dhcp_rpc.get_active_networks(self.adminContext,
host=DHCP_HOSTC)
dhcp_agents_1 = self._list_dhcp_agents_hosting_network(
net1['network']['id'])
dhcp_agents_2 = self._list_dhcp_agents_hosting_network(
net2['network']['id'])
hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(hosta_nets['networks'])
hostc_id = self._get_agent_id(
constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(hostc_nets['networks'])
self.assertEqual(1, num_hosta_nets)
self.assertEqual(1, num_hostc_nets)
self.assertEqual(1, len(dhcp_agents_1['agents']))
self.assertEqual(1, len(dhcp_agents_2['agents']))
self.assertEqual(DHCP_HOSTA, dhcp_agents_1['agents'][0]['host'])
self.assertEqual(DHCP_HOSTC, dhcp_agents_2['agents'][0]['host'])
def test_network_scheduling_on_port_creation(self):
with self.subnet() as subnet:
dhcp_agents = self._list_dhcp_agents_hosting_network(
subnet['subnet']['network_id'])
result0 = len(dhcp_agents['agents'])
self._register_agent_states()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result1 = len(dhcp_agents['agents'])
self.assertEqual(0, result0)
self.assertEqual(1, result1)
def test_network_scheduler_with_disabled_agent(self):
dhcp_hosta = {
'binary': 'quantum-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
with self.port() as port1:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port1['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
agents = self._list_agents()
self._disable_agent(agents['agents'][0]['id'])
with self.port() as port2:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port2['port']['network_id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_scheduler_with_down_agent(self):
dhcp_hosta = {
'binary': 'quantum-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
is_agent_down_str = 'quantum.db.agents_db.AgentDbMixin.is_agent_down'
with mock.patch(is_agent_down_str) as mock_is_agent_down:
mock_is_agent_down.return_value = False
with self.port() as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
with mock.patch(is_agent_down_str) as mock_is_agent_down:
mock_is_agent_down.return_value = True
with self.port() as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_scheduler_with_hosted_network(self):
plugin = manager.QuantumManager.get_plugin()
dhcp_hosta = {
'binary': 'quantum-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
with self.port() as port1:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port1['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
with mock.patch.object(plugin,
'get_dhcp_agents_hosting_networks',
autospec=True) as mock_hosting_agents:
mock_hosting_agents.return_value = plugin.get_agents_db(
self.adminContext)
with self.network('test', do_delete=False) as net1:
pass
with self.subnet(network=net1,
cidr='10.0.1.0/24',
do_delete=False) as subnet1:
pass
with self.port(subnet=subnet1, no_delete=True) as port2:
pass
dhcp_agents = self._list_dhcp_agents_hosting_network(
port2['port']['network_id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_policy(self):
with self.network() as net1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._list_networks_hosted_by_dhcp_agent(
hosta_id, expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_network_to_dhcp_agent(
hosta_id, net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_network_to_dhcp_agent(hosta_id,
net1['network']['id'])
self._remove_network_from_dhcp_agent(
hosta_id, net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._list_dhcp_agents_hosting_network(
net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
def test_network_add_to_dhcp_agent(self):
with self.network() as net1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
num_before_add = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self._add_network_to_dhcp_agent(hosta_id,
net1['network']['id'])
num_after_add = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self.assertEqual(0, num_before_add)
self.assertEqual(1, num_after_add)
def test_network_remove_from_dhcp_agent(self):
dhcp_hosta = {
'binary': 'quantum-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
with self.port() as port1:
num_before_remove = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self._remove_network_from_dhcp_agent(hosta_id,
port1['port']['network_id'])
num_after_remove = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self.assertEqual(1, num_before_remove)
self.assertEqual(0, num_after_remove)
def test_router_auto_schedule_with_hosted(self):
with self.router() as router:
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
self._register_agent_states()
l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA)
l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB)
l3_agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(l3_agents['agents']))
self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host'])
def test_router_auto_schedule_with_hosted_2(self):
# one agent hosts one router
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
l3_hosta = {
'binary': 'quantum-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': True,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
l3_hostb = copy.deepcopy(l3_hosta)
l3_hostb['host'] = L3_HOSTB
with self.router() as router1:
self._register_one_agent_state(l3_hosta)
l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
with self.router() as router2:
self._register_one_agent_state(l3_hostb)
l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB)
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
hostb_id = self._get_agent_id(
constants.AGENT_TYPE_L3,
L3_HOSTB)
hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
num_hostc_routers = len(hostb_routers['routers'])
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, num_hostc_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(1, len(l3_agents_2['agents']))
self.assertEqual(L3_HOSTA, l3_agents_1['agents'][0]['host'])
self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host'])
def test_router_auto_schedule_with_disabled(self):
with contextlib.nested(self.router(),
self.router()):
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._disable_agent(hosta_id)
# first agent will not host router since it is disabled
l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA)
# second agent will host all the routers since first is disabled.
l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB)
hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
num_hostb_routers = len(hostb_routers['routers'])
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
self.assertEqual(2, num_hostb_routers)
self.assertEqual(0, num_hosta_routers)
def test_router_auto_schedule_with_candidates(self):
l3_hosta = {
'binary': 'quantum-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': False,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
with contextlib.nested(self.router(),
self.router()) as (router1, router2):
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
l3_hosta['configurations']['router_id'] = router1['router']['id']
self._register_one_agent_state(l3_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA)
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
# L3 agent will host only the compatible router.
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(0, len(l3_agents_2['agents']))
def test_router_schedule_with_candidates(self):
l3_hosta = {
'binary': 'quantum-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': False,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
with contextlib.nested(self.router(),
self.router(),
self.subnet(),
self.subnet(cidr='10.0.3.0/24')) as (router1,
router2,
subnet1,
subnet2):
l3_hosta['configurations']['router_id'] = router1['router']['id']
self._register_one_agent_state(l3_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._router_interface_action('add',
router1['router']['id'],
subnet1['subnet']['id'],
None)
self._router_interface_action('add',
router2['router']['id'],
subnet2['subnet']['id'],
None)
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
# safe cleanup
self._router_interface_action('remove',
router1['router']['id'],
subnet1['subnet']['id'],
None)
self._router_interface_action('remove',
router2['router']['id'],
subnet2['subnet']['id'],
None)
# L3 agent will host only the compatible router.
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(0, len(l3_agents_2['agents']))
def test_router_without_l3_agents(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
l3agents = (
self.agentscheduler_dbMinxin.get_l3_agents_hosting_routers(
self.adminContext, [router['router']['id']]))
self._delete('routers', router['router']['id'])
self.assertEqual(0, len(l3agents))
def test_router_sync_data(self):
with contextlib.nested(self.subnet(),
self.subnet(cidr='10.0.2.0/24'),
self.subnet(cidr='10.0.3.0/24')) as (
s1, s2, s3):
self._register_agent_states()
self._set_net_external(s1['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s1['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
s2['subnet']['id'],
None)
self._router_interface_action('add',
router['router']['id'],
s3['subnet']['id'],
None)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(l3agents['agents']))
agents = self._list_agents()
another_l3_agent_id = None
another_l3_agent_host = None
default = l3agents['agents'][0]['id']
for com in agents['agents']:
if (com['id'] != default and
com['agent_type'] == constants.AGENT_TYPE_L3):
another_l3_agent_id = com['id']
another_l3_agent_host = com['host']
break
self.assertTrue(another_l3_agent_id is not None)
self._add_router_to_l3_agent(another_l3_agent_id,
router['router']['id'],
expected_code=exc.HTTPConflict.code)
self._remove_router_from_l3_agent(default,
router['router']['id'])
self._add_router_to_l3_agent(another_l3_agent_id,
router['router']['id'])
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(another_l3_agent_host,
l3agents['agents'][0]['host'])
self._remove_router_from_l3_agent(another_l3_agent_id,
router['router']['id'])
self._router_interface_action('remove',
router['router']['id'],
s2['subnet']['id'],
None)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1,
len(l3agents['agents']))
self._router_interface_action('remove',
router['router']['id'],
s3['subnet']['id'],
None)
self._delete('routers', router['router']['id'])
def test_router_add_to_l3_agent(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
num_before_add = len(
self._list_routers_hosted_by_l3_agent(
hosta_id)['routers'])
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._add_router_to_l3_agent(hostb_id,
router1['router']['id'],
expected_code=exc.HTTPConflict.code)
num_after_add = len(
self._list_routers_hosted_by_l3_agent(
hosta_id)['routers'])
self.assertEqual(0, num_before_add)
self.assertEqual(1, num_after_add)
def test_router_policy(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._list_routers_hosted_by_l3_agent(
hosta_id, expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_router_to_l3_agent(
hosta_id, router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_router_to_l3_agent(
hosta_id, router1['router']['id'])
self._remove_router_from_l3_agent(
hosta_id, router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._list_l3_agents_hosting_router(
router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
class OvsDhcpAgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.QuantumDbPluginV2TestCase):
plugin_str = ('quantum.plugins.openvswitch.'
'ovs_quantum_plugin.OVSQuantumPluginV2')
def setUp(self):
self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.dhcp_notifier_cls_p = mock.patch(
'quantum.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
'DhcpAgentNotifyAPI')
self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start()
self.dhcp_notifier_cls.return_value = self.dhcp_notifier
super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
self.addCleanup(self.dhcp_notifier_cls_p.stop)
def test_network_add_to_dhcp_agent_notification(self):
with mock.patch.object(self.dhcp_notifier, 'cast') as mock_dhcp:
with self.network() as net1:
network_id = net1['network']['id']
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._add_network_to_dhcp_agent(hosta_id,
network_id)
mock_dhcp.assert_called_with(
mock.ANY,
self.dhcp_notifier.make_msg(
'network_create_end',
payload={'network': {'id': network_id}}),
topic='dhcp_agent.' + DHCP_HOSTA)
def test_network_remove_from_dhcp_agent_notification(self):
with self.network(do_delete=False) as net1:
network_id = net1['network']['id']
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._add_network_to_dhcp_agent(hosta_id,
network_id)
with mock.patch.object(self.dhcp_notifier, 'cast') as mock_dhcp:
self._remove_network_from_dhcp_agent(hosta_id,
network_id)
mock_dhcp.assert_called_with(
mock.ANY,
self.dhcp_notifier.make_msg(
'network_delete_end',
payload={'network_id': network_id}),
topic='dhcp_agent.' + DHCP_HOSTA)
def test_agent_updated_dhcp_agent_notification(self):
with mock.patch.object(self.dhcp_notifier, 'cast') as mock_dhcp:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
mock_dhcp.assert_called_with(
mock.ANY, self.dhcp_notifier.make_msg(
'agent_updated',
payload={'admin_state_up': False}),
topic='dhcp_agent.' + DHCP_HOSTA)
def test_network_port_create_notification(self):
dhcp_hosta = {
'binary': 'quantum-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'dhcp_agent',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
with mock.patch.object(self.dhcp_notifier, 'cast') as mock_dhcp:
with self.network(do_delete=False) as net1:
with self.subnet(network=net1,
do_delete=False) as subnet1:
with self.port(subnet=subnet1, no_delete=True) as port:
network_id = port['port']['network_id']
expected_calls = [
mock.call(
mock.ANY,
self.dhcp_notifier.make_msg(
'network_create_end',
payload={'network': {'id': network_id}}),
topic='dhcp_agent.' + DHCP_HOSTA),
mock.call(
mock.ANY,
self.dhcp_notifier.make_msg(
'port_create_end',
payload={'port': port['port']}),
topic='dhcp_agent.' + DHCP_HOSTA)]
self.assertEqual(mock_dhcp.call_args_list, expected_calls)
class OvsL3AgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.QuantumDbPluginV2TestCase):
plugin_str = ('quantum.plugins.openvswitch.'
'ovs_quantum_plugin.OVSQuantumPluginV2')
def setUp(self):
self.dhcp_notifier_cls_p = mock.patch(
'quantum.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
'DhcpAgentNotifyAPI')
self.dhcp_notifier = mock.Mock(name='dhcp_notifier')
self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start()
self.dhcp_notifier_cls.return_value = self.dhcp_notifier
super(OvsL3AgentNotifierTestCase, self).setUp(self.plugin_str)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
self.addCleanup(self.dhcp_notifier_cls_p.stop)
def test_router_add_to_l3_agent_notification(self):
plugin = manager.QuantumManager.get_plugin()
with mock.patch.object(plugin.l3_agent_notifier, 'cast') as mock_l3:
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
routers = plugin.get_sync_data(self.adminContext,
[router1['router']['id']])
mock_l3.assert_called_with(
mock.ANY,
plugin.l3_agent_notifier.make_msg(
'router_added_to_agent',
payload=routers),
topic='l3_agent.hosta')
def test_router_remove_from_l3_agent_notification(self):
plugin = manager.QuantumManager.get_plugin()
with mock.patch.object(plugin.l3_agent_notifier, 'cast') as mock_l3:
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
self._remove_router_from_l3_agent(hosta_id,
router1['router']['id'])
mock_l3.assert_called_with(
mock.ANY, plugin.l3_agent_notifier.make_msg(
'router_removed_from_agent',
payload={'router_id': router1['router']['id']}),
topic='l3_agent.hosta')
def test_agent_updated_l3_agent_notification(self):
plugin = manager.QuantumManager.get_plugin()
with mock.patch.object(plugin.l3_agent_notifier, 'cast') as mock_l3:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
mock_l3.assert_called_with(
mock.ANY, plugin.l3_agent_notifier.make_msg(
'agent_updated',
payload={'admin_state_up': False}),
topic='l3_agent.hosta')
class OvsAgentSchedulerTestCaseXML(OvsAgentSchedulerTestCase):
fmt = 'xml'
| {
"content_hash": "c63437072f5c80b695fee951a2a6d516",
"timestamp": "",
"source": "github",
"line_count": 899,
"max_line_length": 79,
"avg_line_length": 49.07230255839822,
"alnum_prop": 0.5095656904524436,
"repo_name": "yamt/neutron",
"id": "4ab15a88a8de20b8a60307784ced21dffd4e8226",
"size": "44707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/tests/unit/openvswitch/test_agent_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4078056"
},
{
"name": "Shell",
"bytes": "10023"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import abc
class OutputMatch(object):
def __init__(self, key, item, words):
self.key = key
self.item = item
self.words = words
class AbstractOutputItem(object):
"""
Represents one target or context word
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def id(self):
"""
Rerturns the unique id of the word
"""
raise NotImplementedError()
class AbstractOutputList(object):
"""
List of target or context word,
label of columns/rows of a matrix
"""
__metaclass__ = abc.ABCMeta
@classmethod
@abc.abstractmethod
def build(cls, stream_rules, stream_data):
raise NotImplementedError()
@abc.abstractmethod
def __len__(self):
"""
Return the number of elements in the list
"""
raise NotImplementedError()
@abc.abstractmethod
def find_matches(self, word, unwanted_words = []):
raise NotImplementedError()
@abc.abstractmethod
def test_compatibility(self, attributes):
"""
Test if all the attributes needed by this list are presents in the argument
Args:
attributes: a list of attributes
"""
raise NotImplementedError()
@abc.abstractmethod
def to_list(self):
"""
Returns a list containing all the string id of the elements of this OutputList.
Used to create the semantic space object
"""
raise NotImplementedError()
@abc.abstractmethod
def __getitem__(self, key):
raise NotImplementedError()
| {
"content_hash": "33b4033e3bae88110b89feaf1b84c9c0",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 87,
"avg_line_length": 22.9,
"alnum_prop": 0.6076107298814722,
"repo_name": "FilippoC/anatomize",
"id": "7ab95b0215bdfe1abce378f0adfb17692764aac9",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/anatomize/output/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90939"
}
],
"symlink_target": ""
} |
import abc
from oslo_config import cfg
from oslo_log import log as logging
from pkg_resources import iter_entry_points
import requests
import six
from magnum.common import clients
from magnum.common import exception
from magnum.common import paths
from magnum.i18n import _
from magnum.i18n import _LW
LOG = logging.getLogger(__name__)
KUBE_SECURE_PORT = '6443'
KUBE_INSECURE_PORT = '8080'
DOCKER_PORT = '2376'
template_def_opts = [
cfg.StrOpt('k8s_atomic_template_path',
default=paths.basedir_def('templates/kubernetes/'
'kubecluster.yaml'),
deprecated_name='template_path',
deprecated_group='bay_heat',
help=_(
'Location of template to build a k8s cluster on atomic.')),
cfg.StrOpt('k8s_coreos_template_path',
default=paths.basedir_def('templates/kubernetes/'
'kubecluster-coreos.yaml'),
help=_(
'Location of template to build a k8s cluster on CoreOS.')),
cfg.StrOpt('etcd_discovery_service_endpoint_format',
default='https://discovery.etcd.io/new?size=%(size)d',
help=_('Url for etcd public discovery endpoint.')),
cfg.StrOpt('swarm_atomic_template_path',
default=paths.basedir_def('templates/swarm/'
'swarmcluster.yaml'),
help=_('Location of template to build a swarm '
'cluster on atomic.')),
cfg.StrOpt('mesos_ubuntu_template_path',
default=paths.basedir_def('templates/mesos/'
'mesoscluster.yaml'),
help=_('Location of template to build a Mesos cluster '
'on Ubuntu.')),
cfg.ListOpt('enabled_definitions',
default=['magnum_vm_atomic_k8s', 'magnum_vm_coreos_k8s',
'magnum_vm_atomic_swarm', 'magnum_vm_ubuntu_mesos'],
help=_('Enabled bay definition entry points.')),
]
docker_registry_opts = [
cfg.StrOpt('swift_region',
help=_('Region name of Swift')),
cfg.StrOpt('swift_registry_container',
default='docker_registry',
help=_('Name of the container in Swift which docker registry '
'stores images in'))
]
CONF = cfg.CONF
CONF.register_opts(template_def_opts, group='bay')
CONF.register_opts(docker_registry_opts, group='docker_registry')
CONF.import_opt('trustee_domain_id', 'magnum.common.keystone', group='trust')
class ParameterMapping(object):
"""A mapping associating heat param and bay/baymodel attr.
A ParameterMapping is an association of a Heat parameter name with
an attribute on a Bay, Baymodel, or both.
In the case of both baymodel_attr and bay_attr being set, the Baymodel
will be checked first and then Bay if the attribute isn't set on the
Baymodel.
Parameters can also be set as 'required'. If a required parameter
isn't set, a RequiredArgumentNotProvided exception will be raised.
"""
def __init__(self, heat_param, baymodel_attr=None,
bay_attr=None, required=False,
param_type=lambda x: x):
self.heat_param = heat_param
self.baymodel_attr = baymodel_attr
self.bay_attr = bay_attr
self.required = required
self.param_type = param_type
def set_param(self, params, baymodel, bay):
value = None
if (self.baymodel_attr and
getattr(baymodel, self.baymodel_attr, None) is not None):
value = getattr(baymodel, self.baymodel_attr)
elif (self.bay_attr and
getattr(bay, self.bay_attr, None) is not None):
value = getattr(bay, self.bay_attr)
elif self.required:
kwargs = dict(heat_param=self.heat_param)
raise exception.RequiredParameterNotProvided(**kwargs)
if value is not None:
value = self.param_type(value)
params[self.heat_param] = value
class OutputMapping(object):
"""A mapping associating heat outputs and bay attr.
An OutputMapping is an association of a Heat output with a key
Magnum understands.
"""
def __init__(self, heat_output, bay_attr=None):
self.bay_attr = bay_attr
self.heat_output = heat_output
def set_output(self, stack, baymodel, bay):
if self.bay_attr is None:
return
output_value = self.get_output_value(stack)
if output_value is not None:
setattr(bay, self.bay_attr, output_value)
def matched(self, output_key):
return self.heat_output == output_key
def get_output_value(self, stack):
for output in stack.to_dict().get('outputs', []):
if output['output_key'] == self.heat_output:
return output['output_value']
LOG.warning(_LW('stack does not have output_key %s'), self.heat_output)
return None
@six.add_metaclass(abc.ABCMeta)
class TemplateDefinition(object):
'''A mapping between Magnum objects and Heat templates.
A TemplateDefinition is essentially a mapping between Magnum objects
and Heat templates. Each TemplateDefinition has a mapping of Heat
parameters.
'''
definitions = None
provides = list()
def __init__(self):
self.param_mappings = list()
self.output_mappings = list()
@staticmethod
def load_entry_points():
for entry_point in iter_entry_points('magnum.template_definitions'):
yield entry_point, entry_point.load(require=False)
@classmethod
def get_template_definitions(cls):
'''Retrieves bay definitions from python entry_points.
Example:
With the following classes:
class TemplateDefinition1(TemplateDefinition):
provides = [
('server_type1', 'os1', 'coe1')
]
class TemplateDefinition2(TemplateDefinition):
provides = [
('server_type2', 'os2', 'coe2')
]
And the following entry_points:
magnum.template_definitions =
template_name_1 = some.python.path:TemplateDefinition1
template_name_2 = some.python.path:TemplateDefinition2
get_template_definitions will return:
{
(server_type1, os1, coe1):
{'template_name_1': TemplateDefinition1},
(server_type2, os2, coe2):
{'template_name_2': TemplateDefinition2}
}
:return: dict
'''
if not cls.definitions:
cls.definitions = dict()
for entry_point, def_class in cls.load_entry_points():
for bay_type in def_class.provides:
bay_type_tuple = (bay_type['server_type'],
bay_type['os'],
bay_type['coe'])
providers = cls.definitions.setdefault(bay_type_tuple,
dict())
providers[entry_point.name] = def_class
return cls.definitions
@classmethod
def get_template_definition(cls, server_type, os, coe):
'''Get enabled TemplateDefinitions.
Returns the enabled TemplateDefinition class for the provided
bay_type.
With the following classes:
class TemplateDefinition1(TemplateDefinition):
provides = [
('server_type1', 'os1', 'coe1')
]
class TemplateDefinition2(TemplateDefinition):
provides = [
('server_type2', 'os2', 'coe2')
]
And the following entry_points:
magnum.template_definitions =
template_name_1 = some.python.path:TemplateDefinition1
template_name_2 = some.python.path:TemplateDefinition2
get_template_name_1_definition('server_type2', 'os2', 'coe2')
will return: TemplateDefinition2
:param server_type: The server_type the bay definition
will build on
:param os: The operation system the bay definition will build on
:param coe: The Container Orchestration Environment the bay will
produce
:return: class
'''
definition_map = cls.get_template_definitions()
bay_type = (server_type, os, coe)
if bay_type not in definition_map:
raise exception.BayTypeNotSupported(
server_type=server_type,
os=os,
coe=coe)
type_definitions = definition_map[bay_type]
for name in cfg.CONF.bay.enabled_definitions:
if name in type_definitions:
return type_definitions[name]()
raise exception.BayTypeNotEnabled(
server_type=server_type, os=os, coe=coe)
def add_parameter(self, *args, **kwargs):
param = ParameterMapping(*args, **kwargs)
self.param_mappings.append(param)
def add_output(self, *args, **kwargs):
mapping_type = kwargs.pop('mapping_type', OutputMapping)
output = mapping_type(*args, **kwargs)
self.output_mappings.append(output)
def get_output(self, *args, **kwargs):
for output in self.output_mappings:
if output.matched(*args, **kwargs):
return output
return None
def get_params(self, context, baymodel, bay, **kwargs):
"""Pulls template parameters from Baymodel and/or Bay.
:param context: Context to pull template parameters for
:param baymodel: Baymodel to pull template parameters from
:param bay: Bay to pull template parameters from
:param extra_params: Any extra params to be provided to the template
:return: dict of template parameters
"""
template_params = dict()
for mapping in self.param_mappings:
mapping.set_param(template_params, baymodel, bay)
if 'extra_params' in kwargs:
template_params.update(kwargs.get('extra_params'))
return template_params
def get_heat_param(self, bay_attr=None, baymodel_attr=None):
"""Returns stack param name.
Return stack param name using bay and baymodel attributes
:param bay_attr bay attribute from which it maps to stack attribute
:param baymodel_attr baymodel attribute from which it maps
to stack attribute
:return stack parameter name or None
"""
for mapping in self.param_mappings:
if (mapping.bay_attr == bay_attr and
mapping.baymodel_attr == baymodel_attr):
return mapping.heat_param
return None
def update_outputs(self, stack, baymodel, bay):
for output in self.output_mappings:
output.set_output(stack, baymodel, bay)
@abc.abstractproperty
def template_path(self):
pass
def extract_definition(self, context, baymodel, bay, **kwargs):
return self.template_path, self.get_params(context, baymodel, bay,
**kwargs)
class BaseTemplateDefinition(TemplateDefinition):
def __init__(self):
super(BaseTemplateDefinition, self).__init__()
self.add_parameter('ssh_key_name',
baymodel_attr='keypair_id',
required=True)
self.add_parameter('server_image',
baymodel_attr='image_id')
self.add_parameter('dns_nameserver',
baymodel_attr='dns_nameserver')
self.add_parameter('http_proxy',
baymodel_attr='http_proxy')
self.add_parameter('https_proxy',
baymodel_attr='https_proxy')
self.add_parameter('no_proxy',
baymodel_attr='no_proxy')
self.add_parameter('number_of_masters',
bay_attr='master_count')
@abc.abstractproperty
def template_path(self):
pass
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
extra_params['trustee_domain_id'] = CONF.trust.trustee_domain_id
extra_params['trustee_user_id'] = bay.trustee_user_id
extra_params['trustee_username'] = bay.trustee_username
extra_params['trustee_password'] = bay.trustee_password
extra_params['trust_id'] = bay.trust_id
extra_params['auth_url'] = context.auth_url
return super(BaseTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
def get_discovery_url(self, bay):
if hasattr(bay, 'discovery_url') and bay.discovery_url:
discovery_url = bay.discovery_url
else:
discovery_endpoint = (
cfg.CONF.bay.etcd_discovery_service_endpoint_format %
{'size': bay.master_count})
try:
discovery_url = requests.get(discovery_endpoint).text
except Exception as err:
LOG.error(six.text_type(err))
raise exception.GetDiscoveryUrlFailed(
discovery_endpoint=discovery_endpoint)
if not discovery_url:
raise exception.InvalidDiscoveryURL(
discovery_url=discovery_url,
discovery_endpoint=discovery_endpoint)
else:
bay.discovery_url = discovery_url
return discovery_url
class K8sApiAddressOutputMapping(OutputMapping):
def set_output(self, stack, baymodel, bay):
# TODO(yuanying): port number is hardcoded, this will be fix
protocol = 'https'
port = KUBE_SECURE_PORT
if baymodel.tls_disabled:
protocol = 'http'
port = KUBE_INSECURE_PORT
output_value = self.get_output_value(stack)
params = {
'protocol': protocol,
'address': output_value,
'port': port,
}
output_value = "%(protocol)s://%(address)s:%(port)s" % params
if output_value is not None:
setattr(bay, self.bay_attr, output_value)
class SwarmApiAddressOutputMapping(OutputMapping):
def set_output(self, stack, baymodel, bay):
protocol = 'https'
if baymodel.tls_disabled:
protocol = 'tcp'
output_value = self.get_output_value(stack)
params = {
'protocol': protocol,
'address': output_value,
'port': DOCKER_PORT,
}
output_value = "%(protocol)s://%(address)s:%(port)s" % params
if output_value is not None:
setattr(bay, self.bay_attr, output_value)
class K8sTemplateDefinition(BaseTemplateDefinition):
"""Base Kubernetes template."""
def __init__(self):
super(K8sTemplateDefinition, self).__init__()
self.add_parameter('master_flavor',
baymodel_attr='master_flavor_id')
self.add_parameter('minion_flavor',
baymodel_attr='flavor_id')
self.add_parameter('number_of_minions',
bay_attr='node_count')
self.add_parameter('external_network',
baymodel_attr='external_network_id',
required=True)
self.add_parameter('network_driver',
baymodel_attr='network_driver')
self.add_parameter('volume_driver',
baymodel_attr='volume_driver')
self.add_parameter('tls_disabled',
baymodel_attr='tls_disabled',
required=True)
self.add_parameter('registry_enabled',
baymodel_attr='registry_enabled')
self.add_parameter('bay_uuid',
bay_attr='uuid',
param_type=str)
self.add_output('api_address',
bay_attr='api_address',
mapping_type=K8sApiAddressOutputMapping)
self.add_output('kube_minions_private',
bay_attr=None)
self.add_output('kube_minions',
bay_attr='node_addresses')
self.add_output('kube_masters_private',
bay_attr=None)
self.add_output('kube_masters',
bay_attr='master_addresses')
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
scale_mgr = kwargs.pop('scale_manager', None)
if scale_mgr:
hosts = self.get_output('kube_minions')
extra_params['minions_to_remove'] = (
scale_mgr.get_removal_nodes(hosts))
extra_params['discovery_url'] = self.get_discovery_url(bay)
osc = clients.OpenStackClients(context)
extra_params['magnum_url'] = osc.magnum_url()
if baymodel.tls_disabled:
extra_params['loadbalancing_protocol'] = 'HTTP'
extra_params['kubernetes_port'] = 8080
label_list = ['flannel_network_cidr', 'flannel_backend',
'flannel_network_subnetlen']
for label in label_list:
extra_params[label] = baymodel.labels.get(label)
if baymodel.registry_enabled:
extra_params['swift_region'] = CONF.docker_registry.swift_region
extra_params['registry_container'] = (
CONF.docker_registry.swift_registry_container)
return super(K8sTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
class AtomicK8sTemplateDefinition(K8sTemplateDefinition):
"""Kubernetes template for a Fedora Atomic VM."""
provides = [
{'server_type': 'vm',
'os': 'fedora-atomic',
'coe': 'kubernetes'},
]
def __init__(self):
super(AtomicK8sTemplateDefinition, self).__init__()
self.add_parameter('docker_volume_size',
baymodel_attr='docker_volume_size')
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
extra_params['username'] = context.user_name
extra_params['tenant_name'] = context.tenant
osc = clients.OpenStackClients(context)
extra_params['region_name'] = osc.cinder_region_name()
return super(AtomicK8sTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
@property
def template_path(self):
return cfg.CONF.bay.k8s_atomic_template_path
class CoreOSK8sTemplateDefinition(K8sTemplateDefinition):
"""Kubernetes template for CoreOS VM."""
provides = [
{'server_type': 'vm', 'os': 'coreos', 'coe': 'kubernetes'},
]
@property
def template_path(self):
return cfg.CONF.bay.k8s_coreos_template_path
class AtomicSwarmTemplateDefinition(BaseTemplateDefinition):
"""Docker swarm template for a Fedora Atomic VM."""
provides = [
{'server_type': 'vm', 'os': 'fedora-atomic', 'coe': 'swarm'},
]
def __init__(self):
super(AtomicSwarmTemplateDefinition, self).__init__()
self.add_parameter('bay_uuid',
bay_attr='uuid',
param_type=str)
self.add_parameter('number_of_nodes',
bay_attr='node_count')
self.add_parameter('master_flavor',
baymodel_attr='master_flavor_id')
self.add_parameter('node_flavor',
baymodel_attr='flavor_id')
self.add_parameter('docker_volume_size',
baymodel_attr='docker_volume_size')
self.add_parameter('external_network',
baymodel_attr='external_network_id',
required=True)
self.add_parameter('network_driver',
baymodel_attr='network_driver')
self.add_parameter('tls_disabled',
baymodel_attr='tls_disabled',
required=True)
self.add_parameter('registry_enabled',
baymodel_attr='registry_enabled')
self.add_output('api_address',
bay_attr='api_address',
mapping_type=SwarmApiAddressOutputMapping)
self.add_output('swarm_master_private',
bay_attr=None)
self.add_output('swarm_masters',
bay_attr='master_addresses')
self.add_output('swarm_nodes_private',
bay_attr=None)
self.add_output('swarm_nodes',
bay_attr='node_addresses')
self.add_output('discovery_url',
bay_attr='discovery_url')
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
extra_params['discovery_url'] = self.get_discovery_url(bay)
# HACK(apmelton) - This uses the user's bearer token, ideally
# it should be replaced with an actual trust token with only
# access to do what the template needs it to do.
osc = clients.OpenStackClients(context)
extra_params['magnum_url'] = osc.magnum_url()
label_list = ['flannel_network_cidr', 'flannel_backend',
'flannel_network_subnetlen']
for label in label_list:
extra_params[label] = baymodel.labels.get(label)
if baymodel.registry_enabled:
extra_params['swift_region'] = CONF.docker_registry.swift_region
extra_params['registry_container'] = (
CONF.docker_registry.swift_registry_container)
return super(AtomicSwarmTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
@property
def template_path(self):
return cfg.CONF.bay.swarm_atomic_template_path
class UbuntuMesosTemplateDefinition(BaseTemplateDefinition):
"""Mesos template for Ubuntu VM."""
provides = [
{'server_type': 'vm', 'os': 'ubuntu', 'coe': 'mesos'},
]
def __init__(self):
super(UbuntuMesosTemplateDefinition, self).__init__()
self.add_parameter('external_network',
baymodel_attr='external_network_id',
required=True)
self.add_parameter('number_of_slaves',
bay_attr='node_count')
self.add_parameter('master_flavor',
baymodel_attr='master_flavor_id')
self.add_parameter('slave_flavor',
baymodel_attr='flavor_id')
self.add_parameter('cluster_name',
bay_attr='name')
self.add_parameter('volume_driver',
baymodel_attr='volume_driver')
self.add_output('api_address',
bay_attr='api_address')
self.add_output('mesos_master_private',
bay_attr=None)
self.add_output('mesos_master',
bay_attr='master_addresses')
self.add_output('mesos_slaves_private',
bay_attr=None)
self.add_output('mesos_slaves',
bay_attr='node_addresses')
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
# HACK(apmelton) - This uses the user's bearer token, ideally
# it should be replaced with an actual trust token with only
# access to do what the template needs it to do.
osc = clients.OpenStackClients(context)
extra_params['auth_url'] = context.auth_url
extra_params['username'] = context.user_name
extra_params['tenant_name'] = context.tenant
extra_params['domain_name'] = context.domain_name
extra_params['region_name'] = osc.cinder_region_name()
label_list = ['rexray_preempt', 'mesos_slave_isolation',
'mesos_slave_image_providers',
'mesos_slave_work_dir',
'mesos_slave_executor_environment_variables']
for label in label_list:
extra_params[label] = baymodel.labels.get(label)
return super(UbuntuMesosTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
@property
def template_path(self):
return cfg.CONF.bay.mesos_ubuntu_template_path
| {
"content_hash": "7b5c98ed29e9c2b0a49967bb2a173dc4",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 79,
"avg_line_length": 37.44230769230769,
"alnum_prop": 0.5671052111730078,
"repo_name": "jay-lau/magnum",
"id": "3bc40847011c7ef846fd3cbfaa787dd707650fd9",
"size": "25911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnum/conductor/template_definition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Python",
"bytes": "393112"
}
],
"symlink_target": ""
} |
import contextlib
import logging
import os
import shutil
import subprocess
import socket
import sys
import time
import pywintypes
import win32api
import win32service
import win32serviceutil
import rpc_client
_UPDATER_TEST_SERVICE_NAME = 'UpdaterTestService'
# Errors that might be raised when interacting with the service.
_ServiceErrors = (OSError, pywintypes.error, win32api.error, win32service.error,
WindowsError) # pylint: disable=undefined-variable
def _RunCommand(command, log_error=True):
"""Run a command and logs stdout/stderr if needed.
Args:
command: Command to run.
log_error: Whether to log the stderr.
Returns:
True if the process exits with 0.
"""
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
logging.info('Command %s stdout:\n %s', command, stdout)
if log_error and stderr:
logging.error('Command %s stderr:\n %s', command, stderr)
return process.returncode == 0
def _SetupEnvironmentForVPython():
"""Setup vpython environment."""
if os.getenv('VIRTUAL_ENV') is None:
logging.info('Not running in vpython, no additional setup is needed.')
return
# vpython_spec above brings the pywin32 module we need, but it may not be
# ready to use, run the post install scripts as described by
# https://pypi.org/project/pywin32/.
# This script outputs some error messages to stderr if it has run before.
# So skip logging to avoid this log pollution.
post_install_script = os.path.join(
os.path.dirname(os.path.abspath(sys.executable)),
'pywin32_postinstall.py')
_RunCommand([sys.executable, post_install_script, '-install'],
log_error=False)
# Make pythonservice.exe explicit for our service. This is to avoid pickup
# an incompatible interpreter accidentally.
source = os.path.join(
os.environ['VIRTUAL_ENV'], 'Lib',
'site-packages', 'win32', 'pythonservice.exe')
python_service_path = os.path.join(
os.path.dirname(os.path.abspath(sys.executable)), 'pythonservice.exe')
if not os.path.exists(python_service_path):
shutil.copyfile(source, python_service_path)
os.environ['PYTHON_SERVICE_EXE'] = python_service_path
def _IsServiceInStatus(status):
"""Returns the if test service is in the given status."""
try:
return status == win32serviceutil.QueryServiceStatus(
_UPDATER_TEST_SERVICE_NAME)[1]
except _ServiceErrors as err:
return False
def _MainServiceScriptPath():
"""Returns the service main script path."""
# Assumes updater_test_service.py file is in the same directory as this file.
service_main = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'updater_test_service.py')
if not os.path.isfile(service_main):
logging.error('Cannot find service main module: %s', service_main)
return None
return service_main
def _WaitServiceStatus(status, timeout=30):
"""Wait the service to be in the given state."""
check_interval = 0.2
for i in range(int(timeout / check_interval)):
if _IsServiceInStatus(status):
return True
time.sleep(check_interval)
return False
def InstallService():
"""Install updater test service.
If the service was previously installed, it will be updated.
Returns:
True if the service is installed successfully.
"""
_SetupEnvironmentForVPython()
service_main = _MainServiceScriptPath()
if not service_main:
logging.error('Cannot find the service main script [%s].', service_main)
return False
try:
if _IsServiceInStatus(win32service.SERVICE_RUNNING) and not StopService():
logging.error('Cannot stop existing test service.')
return False
logging.info('Installing service with script: %s', service_main)
command = [
sys.executable, service_main, '--interactive', '--startup', 'auto',
'install'
]
if _RunCommand(command):
logging.info('Service [%s] installed.', _UPDATER_TEST_SERVICE_NAME)
return True
else:
logging.error('Failed to install [%s].', _UPDATER_TEST_SERVICE_NAME)
return False
except _ServiceErrors as err:
logging.exception(err)
return False
def UninstallService():
"""Uninstall the service."""
service_main = _MainServiceScriptPath()
if not service_main:
logging.error('Unexpected: missing service main script [%s].', service_main)
return False
try:
if _IsServiceInStatus(win32service.SERVICE_RUNNING) and not StopService():
logging.error('Cannot stop test service for uninstall.')
return False
command = [sys.executable, service_main, 'remove']
if _RunCommand(command):
logging.error('Service [%s] uninstalled.', _UPDATER_TEST_SERVICE_NAME)
return True
else:
logging.error('Failed to uninstall [%s].', _UPDATER_TEST_SERVICE_NAME)
return False
except _ServiceErrors as err:
logging.error('Failed to install service.')
logging.exception(err)
return False
def StartService(timeout=30):
"""Start updater test service and make sure it is reachable.
Args:
timeout: How long to wait for service to be ready.
Returns:
True if the service is started successfully.
"""
logging.info('Starting service [%s].', _UPDATER_TEST_SERVICE_NAME)
if _IsServiceInStatus(win32service.SERVICE_RUNNING):
logging.info('Test service is already running.')
return True
try:
win32serviceutil.StartService(_UPDATER_TEST_SERVICE_NAME)
if not _WaitServiceStatus(win32service.SERVICE_RUNNING, timeout):
logging.error('Wait for service start failed.')
return False
logging.error('Service %s started.', _UPDATER_TEST_SERVICE_NAME)
return rpc_client.TestConnection()
except _ServiceErrors as err:
logging.error('Failed to start service.')
logging.exception(err)
return False
def StopService(timeout=30):
"""Stop test service if it is running.
Returns:
True if the service is stopped successfully.
"""
logging.info('Stopping service [%s]...', _UPDATER_TEST_SERVICE_NAME)
try:
if not _IsServiceInStatus(win32service.SERVICE_RUNNING):
return True
win32serviceutil.StopService(_UPDATER_TEST_SERVICE_NAME)
if not _WaitServiceStatus(win32service.SERVICE_STOPPED, timeout):
logging.error('Wait for service stop failed.')
return False
logging.info('Service [%s] stopped.', _UPDATER_TEST_SERVICE_NAME)
return True
except _ServiceErrors as err:
logging.error('Failed to stop service.')
logging.exception(err)
return False
@contextlib.contextmanager
def OpenService():
"""Open the service as a managed resource."""
try:
if InstallService() and StartService():
yield _UPDATER_TEST_SERVICE_NAME
else:
yield None
finally:
UninstallService()
if __name__ == '__main__':
if len(sys.argv) == 1:
logging.error('Must provide an action.')
sys.exit(-1)
command = sys.argv[1]
if command == 'setup':
result = InstallService() and StartService()
elif command == 'teardown':
result = UninstallService()
else:
logging.error('Unknown command: %s.', command)
sys.exit(-2)
sys.exit(0 if result else 1)
| {
"content_hash": "e36ad78edf92b7f783d4e9bd4d1667ed",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 80,
"avg_line_length": 29.69795918367347,
"alnum_prop": 0.6944749862561848,
"repo_name": "ric2b/Vivaldi-browser",
"id": "3389bdf1da31f59f18910139c725f72358e4df53",
"size": "7599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/chrome/updater/test/service/win/updater_test_service_control.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import sys
import pytest
try:
import clr
except ImportError:
clr = None
from msl import loadlib
def add_py4j_in_eggs():
# if py4j is located in the .eggs directory and not in the site-packages directory
# then the py4j*.jar file cannot be found, so we need to create a PY4J_JAR env variable
import py4j
os.environ['PY4J_JAR'] = os.path.join(
'.eggs',
'py4j-{}-py{}.{}.egg'.format(py4j.__version__, sys.version_info.major, sys.version_info.minor),
'share',
'py4j',
'py4j{}.jar'.format(py4j.__version__)
)
def has_labview_runtime():
if loadlib.IS_PYTHON_64BIT:
root = r'C:\Program Files\National Instruments\Shared\LabVIEW Run-Time'
else:
root = r'C:\Program Files (x86)\National Instruments\Shared\LabVIEW Run-Time'
if not os.path.isdir(root):
return False
for item in os.listdir(root):
path = os.path.join(root, item, 'lvrt.dll')
if os.path.isfile(path):
return True
return False
@pytest.fixture(autouse=True)
def doctest_skipif(doctest_namespace):
if loadlib.IS_PYTHON2:
py2 = lambda: pytest.skip('requires Python 3')
else:
py2 = lambda: None
if sys.version_info[:2] < (3, 6):
less_36 = lambda: pytest.skip('ignore Python <3.6 since dict does not preserve insertion order')
else:
less_36 = lambda: None
if not loadlib.IS_WINDOWS:
not_windows = lambda: pytest.skip('not Windows')
readme_com = lambda: pytest.skip('skipped at COM test')
else:
not_windows = lambda: None
readme_com = lambda: None
if loadlib.IS_MAC:
is_mac = lambda: pytest.skip('is macOS')
else:
is_mac = lambda: None
if loadlib.IS_PYTHON_64BIT:
bit64 = lambda: pytest.skip('requires 32-bit Python')
bit32 = lambda: None
readme_all = lambda: None
else:
bit64 = lambda: None
bit32 = lambda: pytest.skip('requires 64-bit Python')
readme_all = lambda: pytest.skip('skipped all tests')
if loadlib.IS_PYTHON_64BIT and has_labview_runtime():
no_labview64 = lambda: None
else:
no_labview64 = lambda: pytest.skip('requires 64-bit LabVIEW Run-Time Engine')
no_labview32 = lambda: pytest.skip('not checking if 32-bit LabVIEW is installed')
if clr is None:
readme_dotnet = lambda: pytest.skip('skipped at .NET test')
no_pythonnet = lambda: pytest.skip('pythonnet is not installed')
else:
readme_dotnet = lambda: None
no_pythonnet = lambda: None
doctest_namespace['SKIP_IF_PYTHON_2'] = py2
doctest_namespace['SKIP_IF_PYTHON_LESS_THAN_3_6'] = less_36
doctest_namespace['SKIP_IF_NOT_WINDOWS'] = not_windows
doctest_namespace['SKIP_IF_MACOS'] = is_mac
doctest_namespace['SKIP_IF_64BIT'] = bit64
doctest_namespace['SKIP_IF_32BIT'] = bit32
doctest_namespace['SKIP_IF_LABVIEW64_NOT_INSTALLED'] = no_labview64
doctest_namespace['SKIP_LABVIEW32'] = no_labview32
doctest_namespace['SKIP_README_DOTNET'] = readme_dotnet
doctest_namespace['SKIP_README_COM'] = readme_com
doctest_namespace['SKIP_README_ALL'] = readme_all
doctest_namespace['SKIP_IF_NO_PYTHONNET'] = no_pythonnet
skipif_no_comtypes = pytest.mark.skipif(
not loadlib.IS_WINDOWS,
reason='comtypes is only supported on Windows'
)
skipif_no_labview_runtime = pytest.mark.skipif(
not has_labview_runtime(),
reason='requires LabVIEW Run-Time Engine'
)
skipif_no_pythonnet = pytest.mark.skipif(
clr is None,
reason='pythonnet is not installed'
)
skipif_no_server32 = pytest.mark.skipif(
loadlib.IS_MAC,
reason='32-bit server does not exist'
)
skipif_not_windows = pytest.mark.skipif(
not loadlib.IS_WINDOWS,
reason='not Windows'
)
| {
"content_hash": "794ba169e496f6387e2a1fe1d035d5a2",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 104,
"avg_line_length": 30.862903225806452,
"alnum_prop": 0.651685393258427,
"repo_name": "MSLNZ/msl-loadlib",
"id": "9cd783d385f421b7f799c4b3af8abe494750aeea",
"size": "3827",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1669"
},
{
"name": "C#",
"bytes": "5516"
},
{
"name": "C++",
"bytes": "2630"
},
{
"name": "Fortran",
"bytes": "4961"
},
{
"name": "Java",
"bytes": "15092"
},
{
"name": "Python",
"bytes": "286889"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../src')
import getopt
opts, _ = getopt.getopt(sys.argv[1:], 't:', ['test'])
n_night_shifts = 5
n_day_shifts = 5
n_tasks = n_night_shifts+n_day_shifts
horizon = n_tasks
from pyschedule import Scenario, solvers, plotters
S = Scenario('shift_bounds',horizon=horizon)
R = S.Resource('P')
for i in range(n_night_shifts):
# added some delay cost, so without any
# constraint, there would be first 5 night shifts
# and then 5 day shifts
T = S.Task('N%i'%i,delay_cost=2)
# the shift type of night shifts is -1
T.shift_type = -1
T += R
for i in range(n_day_shifts):
T = S.Task('D%i'%i,delay_cost=1)
# the shift type of day shifts is -1
T.shift_type = 1
T += R
for i in range(horizon):
# for every set of periods 1..i, make sure that
# there is always at most one more night shift than
# day shifts and vice versa. Each capacity constraint
# limits the sum of 'shift_types' in the range
S += R[:i]['shift_type'] <= 1
S += R[:i]['shift_type'] >= -1
if solvers.mip.solve(S,msg=0,kind='CBC'):
if ('--test','') in opts:
assert( set( T.start_value % 2 for T in S.tasks() if T.name.startswith('N') ) == {0} )
assert( set( T.start_value % 2 for T in S.tasks() if T.name.startswith('D') ) == {1} )
print('test passed')
else:
plotters.matplotlib.plot(S)
else:
print('no solution found')
assert(1==0)
| {
"content_hash": "41853a924e177494bd8639bbfadda863",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 88,
"avg_line_length": 29.217391304347824,
"alnum_prop": 0.6555059523809523,
"repo_name": "timnon/pyschedule",
"id": "f489c8e357b676ad2e0bdc06344fbc7a252a41cd",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/alternating-shifts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "4908"
},
{
"name": "Jupyter Notebook",
"bytes": "1727870"
},
{
"name": "Python",
"bytes": "109000"
}
],
"symlink_target": ""
} |
import _index
from energy.libs.MongoStock import Feed
from energy.libs.eAlgoLib import eAlgoLib as eal
from pyalgotrade import strategy
from pyalgotrade import bar
#from pyalgotrade.technical import stoch
from pyalgotrade.technical import ma
from pyalgotrade.talibext import indicator
from pyalgotrade.technical import atr
import pandas as pd
import sys
class pyAlgoSMASTOCH(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, bBandsPeriod):
strategy.BacktestingStrategy.__init__(self, feed)
self.setDebugMode(False)
self.__instrument = instrument
self.__feed = feed
self.__position = None
#self.__stochK = stoch.StochasticOscillator(feed[instrument], 5, 3)
self.__mafast = ma.EMA(feed[instrument].getCloseDataSeries(), 5)
self.__maslow = ma.EMA(feed[instrument].getCloseDataSeries(), 30)
self.__atr = atr.ATR(feed[instrument], 15)
self.__col = ["buyPrice","buyTime","sellPrice","sellTime", "returns"]
self.__msdf = pd.DataFrame(columns=self.__col)
self.__buyPrice = 0
self.__buyTime = None
self.setUseAdjustedValues(True)
def EchoDF(self):
return self.__msdf
def onEnterOk(self, position):
execInfo = position.getEntryOrder().getExecutionInfo()
#self.info("BUY at $%.2f"%(execInfo.getPrice()))
self.__buyPrice = execInfo.getPrice()
self.__buyTime = execInfo.getDateTime()
def onEnterCanceled(self, position):
#self.info("onEnterCanceled")
self.__position = None
def onExitOk(self, position):
execInfo = position.getExitOrder().getExecutionInfo()
#self.info("SELL at $%.2f"%(execInfo.getPrice()))
self.__position = None
pdser = pd.Series([self.__buyPrice, str(self.__buyTime)[:10],
execInfo.getPrice(),str(execInfo.getDateTime())[:10], (execInfo.getPrice() -self.__buyPrice)],index=self.__col )
self.__msdf = self.__msdf.append(pdser,ignore_index=True)
self.__buyPrice = 0
self.__buyTime = None
def onExitCanceled(self, position):
self.info("onExitCanceled")
self.__position.exitMarket()
def onBars(self, bars):
if self.__atr is None:
return
barDs = self.getFeed().getDataSeries(self.__instrument)
atr_21 = self.__atr[-21:]
if len(atr_21) < 20:
return
#print atr_21[:-1]
maxatr = max(atr_21[:-1])
nowatr = self.__atr[-1]
stochk, stochd = indicator.STOCH(barDs, 100, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)
#print stochk[-1],"--",stochd[-1]
#print "stochk:%s mfast:%s mslow:%s nowatr:%s maxatr:%s"%(stochk[-1], self.__mafast[-1], self.__maslow[-1], nowatr, maxatr)
if self.__position is None:
if stochk[-1] < 20 and self.__mafast[-1] < self.__maslow[-1] :
# Enter a buy market order for 10 shares. The order is good till canceled.
self.__position = self.enterLong(self.__instrument, 10, True)
#print dir(self.__position)
# Check if we have to exit the position.
elif stochk[-1] > 75 and nowatr < maxatr and not self.__position.exitActive():
self.__position.exitMarket()
def main(i, code):
#code = "000592"
dbfeed = Feed(code, bar.Frequency.DAY, 1024)
dbfeed.loadBars()
myStrategy = pyAlgoSMASTOCH(dbfeed, code, bBandsPeriod=i)
ms = eal()
ms.setDebug(True)
ms.protfolio(myStrategy)
if __name__ == "__main__":
code = sys.argv[1]
#for m in range(10,60,5):
m = 40
main(m, code)
| {
"content_hash": "47e606b907a0e193def155414531fc62",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 139,
"avg_line_length": 36.524752475247524,
"alnum_prop": 0.6177825969097316,
"repo_name": "vyouzhis/energy",
"id": "1a7e60cc016a1eda3628e9166ed63b56d671fa3d",
"size": "3834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epyalgo/pyAlgoSTOCH.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "88797"
}
],
"symlink_target": ""
} |
import pytest
import boto3
import base64
import os
import json
import time
import uuid
from faker import Factory
from dotenv import Dotenv
from api.chalicelib.aws_ir.aws_ir.plugins import tag_host
CFN_TEMPLATE_PATH = "cfn/dummy-machine.yml"
SESSION = boto3.Session(
profile_name='incident-account',
region_name='us-west-2'
)
CFN_CLIENT = SESSION.client('cloudformation')
EC2_CLIENT = SESSION.client('ec2')
STACKNAME="InstanceCompromise-{stack_uuid}".format(stack_uuid=uuid.uuid4().hex)
def setup_module(module):
print ("setup_module:%s" % module.__name__)
with open(CFN_TEMPLATE_PATH) as f:
CFN_CLIENT.create_stack(
StackName = STACKNAME,
TemplateBody = f.read(),
Capabilities = ['CAPABILITY_IAM']
)
def teardown_module(module):
print ("teardown_module:%s" % module.__name__)
CFN_CLIENT.delete_stack(StackName=STACKNAME)
def find_host():
host_instance_id = find_host_id()
response = EC2_CLIENT.describe_instances(
InstanceIds=[host_instance_id]
)
incident_instance = response['Reservations'][0]['Instances'][0]
return {
'vpc_id': incident_instance['VpcId'],
'region': 'us-west-2',
'case_number': '1234567',
'instance_id': incident_instance['InstanceId'],
'compromise_type': 'host',
'private_ip_address': incident_instance.get('PrivateIpAddress', None),
'public_ip_address': incident_instance.get('PublicIpAddress', None),
}
def find_host_id():
host_instance_id = None
retries = 0
while host_instance_id == None and retries < 30:
try:
response = CFN_CLIENT.describe_stacks(
StackName=STACKNAME
)
host_instance_id = response['Stacks'][0]['Outputs'][0]['OutputValue']
print "found {}".format(host_instance_id)
return host_instance_id
except:
++retries
time.sleep(10)
continue
def test_plugin():
resource = find_host()
plugin = tag_host.Tag(
client=EC2_CLIENT,
compromised_resource=resource,
dry_run=False
)
status = plugin.validate()
assert status == True
| {
"content_hash": "6e109b9ec78963037b6bee5d389b3b43",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 81,
"avg_line_length": 26.452380952380953,
"alnum_prop": 0.621962196219622,
"repo_name": "ThreatResponse/aws_ir-api",
"id": "9487bcf646a2ebaefc010973c8a758ea555778c1",
"size": "2241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plugin_host_tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36854"
},
{
"name": "Roff",
"bytes": "17679"
}
],
"symlink_target": ""
} |
import google.api_core.grpc_helpers
from google.cloud.trace_v1.proto import trace_pb2_grpc
class TraceServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.devtools.cloudtrace.v1 TraceService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/trace.append",
"https://www.googleapis.com/auth/trace.readonly",
)
def __init__(
self, channel=None, credentials=None, address="cloudtrace.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive.",
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"trace_service_stub": trace_pb2_grpc.TraceServiceStub(channel),
}
@classmethod
def create_channel(
cls, address="cloudtrace.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def list_traces(self):
"""Return the gRPC stub for :meth:`TraceServiceClient.list_traces`.
Returns of a list of traces that match the specified filter conditions.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["trace_service_stub"].ListTraces
@property
def get_trace(self):
"""Return the gRPC stub for :meth:`TraceServiceClient.get_trace`.
Gets a single trace by its ID.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["trace_service_stub"].GetTrace
@property
def patch_traces(self):
"""Return the gRPC stub for :meth:`TraceServiceClient.patch_traces`.
Sends new traces to Stackdriver Trace or updates existing traces. If the ID
of a trace that you send matches that of an existing trace, any fields
in the existing trace and its spans are overwritten by the provided values,
and any new fields provided are merged with the existing trace data. If the
ID does not match, a new trace is created.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["trace_service_stub"].PatchTraces
| {
"content_hash": "96f5465915db0a397d3f7f4ebe2820f9",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 87,
"avg_line_length": 37.44927536231884,
"alnum_prop": 0.6178405572755418,
"repo_name": "tswast/google-cloud-python",
"id": "63239085778e5a4961a2279fc61dd034c07a7ebb",
"size": "5771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trace/google/cloud/trace_v1/gapic/transports/trace_service_grpc_transport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
"""
Test plugin infrastructure and hooks.
"""
import argparse
import sys
from unittest import (
mock,
)
import pytest
import cmd2
from cmd2 import (
Cmd2ArgumentParser,
exceptions,
plugin,
with_argparser,
)
class Plugin:
"""A mixin class for testing hook registration and calling"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_counters()
def reset_counters(self):
self.called_preparse = 0
self.called_postparsing = 0
self.called_precmd = 0
self.called_postcmd = 0
self.called_cmdfinalization = 0
###
#
# preloop and postloop hooks
# which share the same signature and are thus interchangable
#
###
def prepost_hook_one(self) -> None:
"""Method used for preloop or postloop hooks"""
self.poutput("one")
def prepost_hook_two(self) -> None:
"""Another method used for preloop or postloop hooks"""
self.poutput("two")
def prepost_hook_too_many_parameters(self, param) -> None:
"""A preloop or postloop hook with too many parameters"""
pass
def prepost_hook_with_wrong_return_annotation(self) -> bool:
"""A preloop or postloop hook with incorrect return type"""
pass
###
#
# preparse hook
#
###
def preparse(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Preparsing hook"""
self.called_preparse += 1
return data
###
#
# Postparsing hooks
#
###
def postparse_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook"""
self.called_postparsing += 1
return data
def postparse_hook_stop(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with requests application exit"""
self.called_postparsing += 1
data.stop = True
return data
def postparse_hook_emptystatement(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with raises an EmptyStatement exception"""
self.called_postparsing += 1
raise exceptions.EmptyStatement
def postparse_hook_exception(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook which raises an exception"""
self.called_postparsing += 1
raise ValueError
def postparse_hook_too_many_parameters(self, data1, data2) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with too many parameters"""
pass
def postparse_hook_undeclared_parameter_annotation(self, data) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with an undeclared parameter type"""
pass
def postparse_hook_wrong_parameter_annotation(self, data: str) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with the wrong parameter type"""
pass
def postparse_hook_undeclared_return_annotation(self, data: cmd2.plugin.PostparsingData):
"""A postparsing hook with an undeclared return type"""
pass
def postparse_hook_wrong_return_annotation(self, data: cmd2.plugin.PostparsingData) -> str:
"""A postparsing hook with the wrong return type"""
pass
###
#
# precommand hooks, some valid, some invalid
#
###
def precmd(self, statement: cmd2.Statement) -> cmd2.Statement:
"""Override cmd.Cmd method"""
self.called_precmd += 1
return statement
def precmd_hook(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook"""
self.called_precmd += 1
return data
def precmd_hook_emptystatement(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an EmptyStatement exception"""
self.called_precmd += 1
raise exceptions.EmptyStatement
def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an exception"""
self.called_precmd += 1
raise ValueError
def precmd_hook_not_enough_parameters(self) -> plugin.PrecommandData:
"""A precommand hook with no parameters"""
pass
def precmd_hook_too_many_parameters(self, one: plugin.PrecommandData, two: str) -> plugin.PrecommandData:
"""A precommand hook with too many parameters"""
return one
def precmd_hook_no_parameter_annotation(self, data) -> plugin.PrecommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def precmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PrecommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def precmd_hook_no_return_annotation(self, data: plugin.PrecommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def precmd_hook_wrong_return_annotation(self, data: plugin.PrecommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# postcommand hooks, some valid, some invalid
#
###
def postcmd(self, stop: bool, statement: cmd2.Statement) -> bool:
"""Override cmd.Cmd method"""
self.called_postcmd += 1
return stop
def postcmd_hook(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook"""
self.called_postcmd += 1
return data
def postcmd_hook_exception(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook with raises an exception"""
self.called_postcmd += 1
raise ZeroDivisionError
def postcmd_hook_not_enough_parameters(self) -> plugin.PostcommandData:
"""A precommand hook with no parameters"""
pass
def postcmd_hook_too_many_parameters(self, one: plugin.PostcommandData, two: str) -> plugin.PostcommandData:
"""A precommand hook with too many parameters"""
return one
def postcmd_hook_no_parameter_annotation(self, data) -> plugin.PostcommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def postcmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PostcommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def postcmd_hook_no_return_annotation(self, data: plugin.PostcommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def postcmd_hook_wrong_return_annotation(self, data: plugin.PostcommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# command finalization hooks, some valid, some invalid
#
###
def cmdfinalization_hook(self, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
"""A command finalization hook."""
self.called_cmdfinalization += 1
return data
def cmdfinalization_hook_stop(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which requests application exit"""
self.called_cmdfinalization += 1
data.stop = True
return data
def cmdfinalization_hook_exception(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises an exception"""
self.called_cmdfinalization += 1
raise ValueError
def cmdfinalization_hook_system_exit(
self, data: cmd2.plugin.CommandFinalizationData
) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises a SystemExit"""
self.called_cmdfinalization += 1
raise SystemExit(5)
def cmdfinalization_hook_keyboard_interrupt(
self, data: cmd2.plugin.CommandFinalizationData
) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises a KeyboardInterrupt"""
self.called_cmdfinalization += 1
raise KeyboardInterrupt
def cmdfinalization_hook_passthrough_exception(
self, data: cmd2.plugin.CommandFinalizationData
) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises a PassThroughException"""
self.called_cmdfinalization += 1
wrapped_ex = OSError("Pass me up")
raise exceptions.PassThroughException(wrapped_ex=wrapped_ex)
def cmdfinalization_hook_not_enough_parameters(self) -> plugin.CommandFinalizationData:
"""A command finalization hook with no parameters."""
pass
def cmdfinalization_hook_too_many_parameters(
self, one: plugin.CommandFinalizationData, two: str
) -> plugin.CommandFinalizationData:
"""A command finalization hook with too many parameters."""
return one
def cmdfinalization_hook_no_parameter_annotation(self, data) -> plugin.CommandFinalizationData:
"""A command finalization hook with no type annotation on the parameter."""
return data
def cmdfinalization_hook_wrong_parameter_annotation(self, data: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with the incorrect type annotation on the parameter."""
return data
def cmdfinalization_hook_no_return_annotation(self, data: plugin.CommandFinalizationData):
"""A command finalizationhook with no type annotation on the return value."""
return data
def cmdfinalization_hook_wrong_return_annotation(self, data: plugin.CommandFinalizationData) -> cmd2.Statement:
"""A command finalization hook with the wrong return type annotation."""
return self.statement_parser.parse('hi there')
class PluggedApp(Plugin, cmd2.Cmd):
"""A sample app with a plugin mixed in"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_say(self, statement):
"""Repeat back the arguments"""
self.poutput(statement)
def do_skip_postcmd_hooks(self, _):
self.poutput("In do_skip_postcmd_hooks")
raise exceptions.SkipPostcommandHooks
parser = Cmd2ArgumentParser(description="Test parser")
parser.add_argument("my_arg", help="some help text")
@with_argparser(parser)
def do_argparse_cmd(self, namespace: argparse.Namespace):
"""Repeat back the arguments"""
self.poutput(namespace.cmd2_statement.get())
###
#
# test pre and postloop hooks
#
###
def test_register_preloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_too_many_parameters)
def test_register_preloop_hook_with_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_preloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\nhello\n'
assert not err
def test_preloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.register_preloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\ntwo\nhello\n'
assert not err
def test_register_postloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_too_many_parameters)
def test_register_postloop_hook_with_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_postloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\n'
assert not err
def test_postloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.register_postloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\ntwo\n'
assert not err
###
#
# test preparse hook
#
###
def test_preparse(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.preparse)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_preparse == 1
###
#
# test postparsing hooks
#
###
def test_postparsing_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_too_many_parameters)
def test_postparsing_hook_undeclared_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_parameter_annotation)
def test_postparsing_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_parameter_annotation)
def test_postparsing_hook_undeclared_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_return_annotation)
def test_postparsing_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_return_annotation)
def test_postparsing_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert not app.called_postparsing
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_stop_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
# register another function but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
def test_postparsing_hook_stop_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert not stop
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
def test_postparsing_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
def test_postparsing_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_exception(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
###
#
# test precmd hooks
#
#####
def test_register_precmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_too_many_parameters)
def test_register_precmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_parameter_annotation)
def test_register_precmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_parameter_annotation)
def test_register_precmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_return_annotation)
def test_register_precmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_return_annotation)
def test_precmd_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, precmd() should be called
assert app.called_precmd == 1
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_precmd == 3
def test_precmd_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents precmd() from being
# called
assert app.called_precmd == 1
def test_precmd_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register another function and make sure it gets called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the second hook should prevent the third
# hook from being called. since the registered hooks are called before precmd(),
# if a registered hook throws an exception, precmd() is never called
assert app.called_precmd == 2
###
#
# test postcmd hooks
#
####
def test_register_postcmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_too_many_parameters)
def test_register_postcmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_parameter_annotation)
def test_register_postcmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_parameter_annotation)
def test_register_postcmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_return_annotation)
def test_register_postcmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_return_annotation)
def test_postcmd(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, postcmd() should be called
assert app.called_postcmd == 1
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_postcmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_postcmd == 3
def test_postcmd_exception_first(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# since the registered hooks are called before postcmd(), if a registered
# hook throws an exception, postcmd() is never called. So we should have
# a count of one because we called the hook that raised the exception
assert app.called_postcmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called
assert app.called_postcmd == 1
def test_postcmd_exception_second(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get the hook and postcmd()
assert app.called_postcmd == 2
# register another function which should be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook_exception)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called. So we have the first hook, and the second hook, which raised
# the exception
assert app.called_postcmd == 2
##
#
# command finalization
#
###
def test_register_cmdfinalization_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_too_many_parameters)
def test_register_cmdfinalization_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_parameter_annotation)
def test_register_cmdfinalization_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_parameter_annotation)
def test_register_cmdfinalization_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_return_annotation)
def test_register_cmdfinalization_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_return_annotation)
def test_cmdfinalization(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 0
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
def test_cmdfinalization_stop_first(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_stop_second(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_hook_exception(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
def test_cmdfinalization_hook_system_exit():
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_system_exit)
stop = app.onecmd_plus_hooks('say hello')
assert stop
assert app.called_cmdfinalization == 1
assert app.exit_code == 5
def test_cmdfinalization_hook_keyboard_interrupt():
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_keyboard_interrupt)
# First make sure KeyboardInterrupt isn't raised unless told to
stop = app.onecmd_plus_hooks('say hello', raise_keyboard_interrupt=False)
assert not stop
assert app.called_cmdfinalization == 1
# Now enable raising the KeyboardInterrupt
app.reset_counters()
with pytest.raises(KeyboardInterrupt):
stop = app.onecmd_plus_hooks('say hello', raise_keyboard_interrupt=True)
assert not stop
assert app.called_cmdfinalization == 1
# Now make sure KeyboardInterrupt isn't raised if stop is already True
app.reset_counters()
stop = app.onecmd_plus_hooks('quit', raise_keyboard_interrupt=True)
assert stop
assert app.called_cmdfinalization == 1
def test_cmdfinalization_hook_passthrough_exception():
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_passthrough_exception)
with pytest.raises(OSError) as excinfo:
app.onecmd_plus_hooks('say hello')
assert 'Pass me up' in str(excinfo.value)
assert app.called_cmdfinalization == 1
def test_skip_postcmd_hooks(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
# Cause a SkipPostcommandHooks exception and verify no postcmd stuff runs but cmdfinalization_hook still does
app.onecmd_plus_hooks('skip_postcmd_hooks')
out, err = capsys.readouterr()
assert "In do_skip_postcmd_hooks" in out
assert app.called_postcmd == 0
assert app.called_cmdfinalization == 1
def test_cmd2_argparse_exception(capsys):
"""
Verify Cmd2ArgparseErrors raised after calling a command prevent postcmd events from
running but do not affect cmdfinalization events
"""
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
# First generate no exception and make sure postcmd_hook, postcmd, and cmdfinalization_hook run
app.onecmd_plus_hooks('argparse_cmd arg_val')
out, err = capsys.readouterr()
assert out == 'arg_val\n'
assert not err
assert app.called_postcmd == 2
assert app.called_cmdfinalization == 1
app.reset_counters()
# Next cause an argparse exception and verify no postcmd stuff runs but cmdfinalization_hook still does
app.onecmd_plus_hooks('argparse_cmd')
out, err = capsys.readouterr()
assert not out
assert "Error: the following arguments are required: my_arg" in err
assert app.called_postcmd == 0
assert app.called_cmdfinalization == 1
| {
"content_hash": "2d6222728f580709868c29b2d1342dcb",
"timestamp": "",
"source": "github",
"line_count": 1005,
"max_line_length": 127,
"avg_line_length": 33.41492537313433,
"alnum_prop": 0.6915311774164731,
"repo_name": "python-cmd2/cmd2",
"id": "61b140ab07d7f37ad8c9daa18e4d0fc710f34064",
"size": "33617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1090127"
},
{
"name": "Shell",
"bytes": "3446"
}
],
"symlink_target": ""
} |
from injector import singleton, inject
from .. import DataBuilder
from ..measurement import Measurements
try:
import configparser
except ImportError:
import configparser as configparser
from sht1x.Sht1x import Sht1x as SHT1x
@singleton
class Sensor:
name = "SHT1x"
priority = 1
@inject
def __init__(self, config_parser: configparser.ConfigParser):
data_pin = int(config_parser.get('sht1x_sensor', 'data_pin'))
sck_pin = int(config_parser.get('sht1x_sensor', 'sck_pin'))
self.sht1x = SHT1x(dataPin=data_pin, sckPin=sck_pin, gpioMode=SHT1x.GPIO_BCM)
def measure(self, data_builder: DataBuilder, measurements: Measurements) -> None:
(temperature, humidity) = self.sht1x.read_temperature_C_and_humidity()
if temperature > -40.0:
try:
dew_point = self.sht1x.calculate_dew_point(temperature, humidity)
dew_point = round(dew_point, 2)
except ValueError:
dew_point = None
temperature = round(temperature, 2)
humidity = round(humidity, 2)
else:
temperature = None
humidity = None
dew_point = None
if temperature and humidity and dew_point and -30 < temperature < 80 and 5 < humidity <= 100:
measurements.temperature = temperature
measurements.relative_humidity = humidity
data_builder.add(self.name, "temperature", "°C", temperature)
if dew_point:
data_builder.add(self.name, "dew point", "°C", dew_point, True)
data_builder.add(self.name, "relative humidity", "%", humidity)
| {
"content_hash": "2cd58d9f1d9566b011cb6abfca4de600",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 101,
"avg_line_length": 33.42,
"alnum_prop": 0.6223818073010173,
"repo_name": "wuan/klimalogger",
"id": "a801ddf70ae76b8f78c85860a1448888454ef682",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klimalogger/sensor/sht1x_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31507"
}
],
"symlink_target": ""
} |
"""
Create a profile object from a `numpy.ndarray` of data
======================================================
Use the TAMOC ambient module to create profiles in netCDF format for use by
TAMOC from idealized laboratory data. This file demonstrates working with the
data input directly by the user as a `numpy.ndarray`.
This script demonstrates the new version of the `ambient.Profile` object, which uses `xarray`. For the older version, which used netCDF datasets, see the script with the same file name but prepended by 'nc'.
Notes
-----
Much of the input data in this script (e.g., columns to extract, column names,
lat and lon location data, date and time, etc.) must be known from the user
(e.g., in this case mostly fictitious) and is hand-coded in the script
text.
Returns
-------
This script generates a `ambient.Profile` object, whose netCDF file is written
to the file::
./Profiles/Profiles/Lab.nc
"""
# S. Socolofsky, July 2013, Texas A&M University <[email protected]>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import ambient
from tamoc import seawater
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Create the synthetic temperature and salinity profiles from idealized
# laboratory conditions
z = np.array([0.0, 2.4])
T = np.array([21.0, 20.0]) + 273.15
S = np.array([0.0, 30.0])
data = np.vstack((z, T, S)).transpose()
ztsp_units = ['m', 'K', 'psu', 'Pa']
# Create an ambient.Profile object for this dataset
lab = ambient.Profile(data, ztsp_units=ztsp_units)
# Plot the density profile using the interpolation function
z = np.linspace(lab.z_min,
lab.z_max, 250)
rho = np.zeros(z.shape)
tsp = lab.get_values(z, ['temperature', 'salinity', 'pressure'])
for i in range(len(z)):
rho[i] = seawater.density(tsp[i,0], tsp[i,1], tsp[i,2])
fig = plt.figure()
ax1 = plt.subplot(121)
ax1.plot(rho, z)
ax1.set_xlabel('Density (kg/m^3)')
ax1.set_ylabel('Depth (m)')
ax1.invert_yaxis()
ax1.set_title('Computed data')
plt.show()
| {
"content_hash": "e2122b93f74b2a5449a5a72e61e920ec",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 210,
"avg_line_length": 32.2463768115942,
"alnum_prop": 0.647191011235955,
"repo_name": "socolofs/tamoc",
"id": "4b557cf9a0c75fd2dbe41822f14230964f3e3542",
"size": "2225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/ambient/np_profile_from_lab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "70820"
},
{
"name": "Jupyter Notebook",
"bytes": "11853"
},
{
"name": "Python",
"bytes": "1346822"
}
],
"symlink_target": ""
} |
'''
Given an input string, reverse the string word by word.
For example,
Given s = "the sky is blue",
return "blue is sky the".
Update (2015-02-12):
For C programmers: Try to solve it in-place in O(1) space.
click to show clarification.
Clarification:
What constitutes a word?
A sequence of non-space characters constitutes a word.
Could the input string contain leading or trailing spaces?
Yes. However, your reversed string should not contain leading or trailing spaces.
How about multiple spaces between two words?
Reduce them to a single space in the reversed string.
'''
class Solution:
# @param s, a string
# @return a string
def reverseWords_1(self, s):
return ' '.join(s.split()[::-1])
def reverseWords(self, s):
result = ''
word = ''
for char in s:
if char !=' ':
word += char
elif len(word) > 0:
if result == '':
result = word + result
else:
result = word + ' '+ result
word = '' # Note: word need to clean to '' here
if len(word) > 0: # We need the other check here for the scenario "a" ---> 'a'
if result != '':
result = ' ' + result
result = word + result
return result
| {
"content_hash": "37490ce64ae6852c22df0d455846ae28",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 87,
"avg_line_length": 29.933333333333334,
"alnum_prop": 0.562731997030438,
"repo_name": "UmassJin/Leetcode",
"id": "2adaae4dfcc4202d772e34ab21be2d191d615c05",
"size": "1347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Array/Reverse_Words_in_a_String.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "717672"
}
],
"symlink_target": ""
} |
"""Base class for linear operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperator"]
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
@tf_export("linalg.LinearOperator")
@six.add_metaclass(abc.ABCMeta)
class LinearOperator(object):
"""Base class defining a [batch of] linear operator[s].
Subclasses of `LinearOperator` provide access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
* Operators that take advantage of special structure, while providing a
consistent API to users.
#### Subclassing
To enable a public method, subclasses should implement the leading-underscore
version of the method. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable
`matmul(x, adjoint=False, name="matmul")` a subclass should implement
`_matmul(x, adjoint=False)`.
#### Performance contract
Subclasses should only implement the assert methods
(e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`
time.
Class docstrings should contain an explanation of computational complexity.
Since this is a high-performance library, attention should be paid to detail,
and explanations can include constants as well as Big-O notation.
#### Shape compatibility
`LinearOperator` subclasses should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
shape. Some subclasses may not support batching.
Examples:
`x` is a batch matrix with compatible shape for `matmul` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
x.shape = [B1,...,Bb] + [N, R]
```
`rhs` is a batch matrix with compatible shape for `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
rhs.shape = [B1,...,Bb] + [M, R]
```
#### Example docstring for subclasses.
This operator acts like a (batch) matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
purposes of identifying and working with compatible arguments the shape is
relevant.
Examples:
```python
some_tensor = ... shape = ????
operator = MyLinOp(some_tensor)
operator.shape()
==> [2, 4, 4]
operator.log_abs_determinant()
==> Shape [2] Tensor
x = ... Shape [2, 4, 5] Tensor
operator.matmul(x)
==> Shape [2, 4, 5] Tensor
```
#### Shape compatibility
This operator acts on batch matrices with compatible shape.
FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
#### Performance
FILL THIS IN
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
dtype,
graph_parents=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize the `LinearOperator`.
**This is a private method for subclass use.**
**Subclasses should copy-paste this `__init__` documentation.**
Args:
dtype: The type of the this `LinearOperator`. Arguments to `matmul` and
`solve` will have to be this type.
graph_parents: Python list of graph prerequisites of this `LinearOperator`
Typically tensors that are passed during initialization.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `dtype` is real, this is equivalent to being symmetric.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If any member of graph_parents is `None` or not a `Tensor`.
ValueError: If hints are set incorrectly.
"""
# Check and auto-set flags.
if is_positive_definite:
if is_non_singular is False:
raise ValueError("A positive definite matrix is always non-singular.")
is_non_singular = True
if is_non_singular:
if is_square is False:
raise ValueError("A non-singular matrix is always square.")
is_square = True
if is_self_adjoint:
if is_square is False:
raise ValueError("A self-adjoint matrix is always square.")
is_square = True
self._is_square_set_or_implied_by_hints = is_square
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._graph_parents = graph_parents
self._is_non_singular = is_non_singular
self._is_self_adjoint = is_self_adjoint
self._is_positive_definite = is_positive_definite
self._name = name or type(self).__name__
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=((values or []) + self._graph_parents)) as scope:
yield scope
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `LinearOperator`."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this `LinearOperator`."""
return self._name
@property
def graph_parents(self):
"""List of graph dependencies of this `LinearOperator`."""
return self._graph_parents
@property
def is_non_singular(self):
return self._is_non_singular
@property
def is_self_adjoint(self):
return self._is_self_adjoint
@property
def is_positive_definite(self):
return self._is_positive_definite
@property
def is_square(self):
"""Return `True/False` depending on if this operator is square."""
# Static checks done after __init__. Why? Because domain/range dimension
# sometimes requires lots of work done in the derived class after init.
auto_square_check = self.domain_dimension == self.range_dimension
if self._is_square_set_or_implied_by_hints is False and auto_square_check:
raise ValueError(
"User set is_square hint to False, but the operator was square.")
if self._is_square_set_or_implied_by_hints is None:
return auto_square_check
return self._is_square_set_or_implied_by_hints
@abc.abstractmethod
def _shape(self):
# Write this in derived class to enable all static shape methods.
raise NotImplementedError("_shape is not implemented.")
@property
def shape(self):
"""`TensorShape` of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`.
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._shape()
@abc.abstractmethod
def _shape_tensor(self):
raise NotImplementedError("_shape_tensor is not implemented.")
def shape_tensor(self, name="shape_tensor"):
"""Shape of this `LinearOperator`, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.shape.is_fully_defined():
return linear_operator_util.shape_tensor(self.shape.as_list())
else:
return self._shape_tensor()
@property
def batch_shape(self):
"""`TensorShape` of batch dimensions of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[:-2]
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of batch dimensions of this operator, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb]`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.batch_shape.is_fully_defined():
return linear_operator_util.shape_tensor(
self.batch_shape.as_list(), name="batch_shape")
else:
return self.shape_tensor()[:-2]
@property
def tensor_rank(self, name="tensor_rank"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
Python integer, or None if the tensor rank is undefined.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self.shape.ndims
def tensor_rank_tensor(self, name="tensor_rank_tensor"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`, determined at runtime.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.tensor_rank is not None:
return ops.convert_to_tensor(self.tensor_rank)
else:
return array_ops.size(self.shape_tensor())
@property
def domain_dimension(self):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.rank is None:
return tensor_shape.Dimension(None)
else:
return self.shape.dims[-1]
def domain_dimension_tensor(self, name="domain_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-1]
@property
def range_dimension(self):
"""Dimension (in the sense of vector spaces) of the range of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.dims:
return self.shape.dims[-2]
else:
return tensor_shape.Dimension(None)
def range_dimension_tensor(self, name="range_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the range of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.range_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-2]
def _assert_non_singular(self):
"""Private default implementation of _assert_non_singular."""
logging.warn(
"Using (possibly slow) default implementation of assert_non_singular."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return self.assert_positive_definite()
else:
singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
# TODO(langmore) Add .eig and .cond as methods.
cond = (math_ops.reduce_max(singular_values, axis=-1) /
math_ops.reduce_min(singular_values, axis=-1))
return check_ops.assert_less(
cond,
self._max_condition_number_to_be_non_singular(),
message="Singular matrix up to precision epsilon.")
def _max_condition_number_to_be_non_singular(self):
"""Return the maximum condition number that we consider nonsingular."""
with ops.name_scope("max_nonsingular_condition_number"):
dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
eps = math_ops.cast(
math_ops.reduce_max([
100.,
math_ops.cast(self.range_dimension_tensor(), self.dtype),
math_ops.cast(self.domain_dimension_tensor(), self.dtype)
]), self.dtype) * dtype_eps
return 1. / eps
def assert_non_singular(self, name="assert_non_singular"):
"""Returns an `Op` that asserts this operator is non singular.
This operator is considered non-singular if
```
ConditionNumber < max{100, range_dimension, domain_dimension} * eps,
eps := np.finfo(self.dtype.as_numpy_dtype).eps
```
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is singular.
"""
with self._name_scope(name):
return self._assert_non_singular()
def _assert_positive_definite(self):
"""Default implementation of _assert_positive_definite."""
logging.warn(
"Using (possibly slow) default implementation of "
"assert_positive_definite."
" Requires conversion to a dense matrix and O(N^3) operations.")
# If the operator is self-adjoint, then checking that
# Cholesky decomposition succeeds + results in positive diag is necessary
# and sufficient.
if self.is_self_adjoint:
return check_ops.assert_positive(
array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),
message="Matrix was not positive definite.")
# We have no generic check for positive definite.
raise NotImplementedError("assert_positive_definite is not implemented.")
def assert_positive_definite(self, name="assert_positive_definite"):
"""Returns an `Op` that asserts this operator is positive definite.
Here, positive definite means that the quadratic form `x^H A x` has positive
real part for all nonzero `x`. Note that we do not require the operator to
be self-adjoint to be positive definite.
Args:
name: A name to give this `Op`.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not positive definite.
"""
with self._name_scope(name):
return self._assert_positive_definite()
def _assert_self_adjoint(self):
dense = self.to_dense()
logging.warn(
"Using (possibly slow) default implementation of assert_self_adjoint."
" Requires conversion to a dense matrix.")
return check_ops.assert_equal(
dense,
linalg.adjoint(dense),
message="Matrix was not equal to its adjoint.")
def assert_self_adjoint(self, name="assert_self_adjoint"):
"""Returns an `Op` that asserts this operator is self-adjoint.
Here we check that this operator is *exactly* equal to its hermitian
transpose.
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not self-adjoint.
"""
with self._name_scope(name):
return self._assert_self_adjoint()
def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(self.dtype, arg.dtype, arg))
@abc.abstractmethod
def _matmul(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError("_matmul is not implemented.")
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as
`self`. See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`.
"""
if isinstance(x, LinearOperator):
if adjoint or adjoint_arg:
raise ValueError(".matmul not supported with adjoints.")
if (x.range_dimension is not None and
self.domain_dimension is not None and
x.range_dimension != self.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(self.domain_dimension, x.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(self, x)
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
x.get_shape()[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
x_mat = array_ops.expand_dims(x, axis=-1)
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matric A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
`x` is treated as a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(x.get_shape()[-1])
return self._matvec(x, adjoint=adjoint)
def _determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self.to_dense())
def determinant(self, name="det"):
"""Determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._determinant()
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
_, log_abs_det = linalg.slogdet(self.to_dense())
return log_abs_det
def log_abs_determinant(self, name="log_abs_det"):
"""Log absolute value of determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._log_abs_determinant()
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Default implementation of _solve."""
if self.is_square is False:
raise NotImplementedError(
"Solve is not yet implemented for non-square operators.")
logging.warn(
"Using (possibly slow) default implementation of solve."
" Requires conversion to a dense matrix and O(N^3) operations.")
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if self._can_use_cholesky():
return linear_operator_util.cholesky_solve_with_broadcast(
linalg_ops.cholesky(self.to_dense()), rhs)
return linear_operator_util.matrix_solve_with_broadcast(
self.to_dense(), rhs, adjoint=adjoint)
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape.
`rhs` is treated like a [batch] matrix meaning for every set of leading
dimensions, the last two dimensions defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[arg_dim])
return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
"""Default implementation of _solvevec."""
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator.
`rhs` is treated like a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector. See class docstring
for definition of compatibility regarding batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[-1])
return self._solvevec(rhs, adjoint=adjoint)
def inverse(self, name="inverse"):
"""Returns the Inverse of this `LinearOperator`.
Given `A` representing this `LinearOperator`, return a `LinearOperator`
representing `A^-1`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`LinearOperator` representing inverse of this matrix.
Raises:
ValueError: When the `LinearOperator` is not hinted to be `non_singular`.
"""
if self.is_square is False: # pylint: disable=g-bool-id-comparison
raise ValueError("Cannot take the Inverse: This operator represents "
"a non square matrix.")
if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison
raise ValueError("Cannot take the Inverse: This operator represents "
"a singular matrix.")
with self._name_scope(name):
return linear_operator_algebra.inverse(self)
def cholesky(self, name="cholesky"):
"""Returns a Cholesky factor as a `LinearOperator`.
Given `A` representing this `LinearOperator`, if `A` is positive definite
self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky
decomposition.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the lower triangular matrix
in the Cholesky decomposition.
Raises:
ValueError: When the `LinearOperator` is not hinted to be positive
definite and self adjoint.
"""
if not self._can_use_cholesky():
raise ValueError("Cannot take the Cholesky decomposition: "
"Not a positive definite self adjoint matrix.")
with self._name_scope(name):
return linear_operator_algebra.cholesky(self)
def _to_dense(self):
"""Generic and often inefficient implementation. Override often."""
logging.warn("Using (possibly slow) default implementation of to_dense."
" Converts by self.matmul(identity).")
if self.batch_shape.is_fully_defined():
batch_shape = self.batch_shape
else:
batch_shape = self.batch_shape_tensor()
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
n = dim_value
else:
n = self.domain_dimension_tensor()
eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
return self.matmul(eye)
def to_dense(self, name="to_dense"):
"""Return a dense (batch) matrix representing this operator."""
with self._name_scope(name):
return self._to_dense()
def _diag_part(self):
"""Generic and often inefficient implementation. Override often."""
return array_ops.matrix_diag_part(self.to_dense())
def diag_part(self, name="diag_part"):
"""Efficiently get the [batch] diagonal part of this operator.
If this operator has shape `[B1,...,Bb, M, N]`, this returns a
`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
```
my_operator = LinearOperatorDiag([1., 2.])
# Efficiently get the diagonal
my_operator.diag_part()
==> [1., 2.]
# Equivalent, but inefficient method
tf.matrix_diag_part(my_operator.to_dense())
==> [1., 2.]
```
Args:
name: A name for this `Op`.
Returns:
diag_part: A `Tensor` of same `dtype` as self.
"""
with self._name_scope(name):
return self._diag_part()
def _trace(self):
return math_ops.reduce_sum(self.diag_part(), axis=-1)
def trace(self, name="trace"):
"""Trace of the linear operator, equal to sum of `self.diag_part()`.
If the operator is square, this is also the sum of the eigenvalues.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
"""
with self._name_scope(name):
return self._trace()
def _add_to_tensor(self, x):
# Override if a more efficient implementation is available.
return self.to_dense() + x
def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x)
def _can_use_cholesky(self):
return self.is_self_adjoint and self.is_positive_definite
| {
"content_hash": "4c48fc76244f662d0e76f6209ebccffd",
"timestamp": "",
"source": "github",
"line_count": 978,
"max_line_length": 99,
"avg_line_length": 34.83537832310839,
"alnum_prop": 0.6464821391881183,
"repo_name": "apark263/tensorflow",
"id": "4c99e86dc59a8c39abb57494ae84bcfdc13faa1b",
"size": "34758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/linalg/linear_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "561314"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "54581021"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1373561"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "899393"
},
{
"name": "Jupyter Notebook",
"bytes": "2618454"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75994"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14340"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "44616385"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "504099"
},
{
"name": "Smarty",
"bytes": "10072"
}
],
"symlink_target": ""
} |
import re
from .exceptions import NotAllowed, LoginRequired
def check_permissions(path, restrictions, request) -> bool:
"""
Enforces the rules specified in `CRM_CONTENT_RESTRICTIONS`.
"""
for rule in restrictions:
if rule['path'] == '*' or (path is not None and re.match(r'^' + rule['path'] + r'.+$', path)):
pass
else:
continue
if rule.get('in_group', None) and rule['in_group'] not in request.user.groups.values_list('name', flat=True):
raise NotAllowed()
if rule.get('login', False) and not request.user.is_authenticated():
raise LoginRequired()
def get_title(path) -> str:
"""
Parses a filename and returns a formatted title.
"""
name = path.split('/')[-1]
name = re.sub('--', ': ', name)
name = re.sub('-', ' ', name)
return name.title()
def get_slug(path) -> str:
""""""
path = 'index' if path is None else path
path = path[:-1] if path[-1] == '/' else path
return path | {
"content_hash": "6533bec3fa3bb55350df17ae2310ee98",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 117,
"avg_line_length": 29.2,
"alnum_prop": 0.5743639921722113,
"repo_name": "zulumarketing/crm",
"id": "cf60d6dcc85cc863f98d3382daa598e5cefe2237",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crm/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7702"
}
],
"symlink_target": ""
} |
r"""
This script transfers pages from a source wiki to a target wiki.
It also copies edit history to a subpage.
-tolang: The target site code.
-tosite: The target site family.
-prefix: Page prefix on the new site.
-overwrite: Existing pages are skipped by default. Use his option to
overwrite pages.
Internal links are *not* repaired!
Pages to work on can be specified using any of:
¶ms;
Example commands:
Transfer all pages in category "Query service" from the English Wikipedia to
the Arabic Wiktionary, adding "Wiktionary:Import enwp/" as prefix:
python pwb.py transferbot -family:wikipedia -lang:en -cat:"Query service" \
-tofamily:wiktionary -tolang:ar -prefix:"Wiktionary:Import enwp/"
Copy the template "Query service" from the Toolserver wiki to wikitech:
python pwb.py transferbot -family:wikipedia -lang:en \
-tofamily:wiktionary -tolang:ar -page:"Template:Query service"
"""
#
# (C) Merlijn van Deen, 2014
# (C) Pywikibot team, 2015-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import pywikibot
from pywikibot import pagegenerators
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class WikiTransferException(Exception):
"""Base class for exceptions from this script.
Makes it easier for clients to catch all expected exceptions that the
script might throw
"""
pass
class TargetSiteMissing(WikiTransferException):
"""Thrown when the target site is the same as the source site.
Based on the way each are initialized, this is likely to happen when the
target site simply hasn't been specified.
"""
pass
class TargetPagesMissing(WikiTransferException):
"""Thrown if no page range has been specified to operate on."""
pass
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
local_args = pywikibot.handle_args(args)
fromsite = pywikibot.Site()
tolang = fromsite.code
tofamily = fromsite.family.name
prefix = ''
overwrite = False
gen_args = []
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if genFactory.handleArg(arg):
gen_args.append(arg)
continue
if arg.startswith('-tofamily'):
tofamily = arg[len('-tofamily:'):]
elif arg.startswith('-tolang'):
tolang = arg[len('-tolang:'):]
elif arg.startswith('-prefix'):
prefix = arg[len('-prefix:'):]
elif arg == "-overwrite":
overwrite = True
tosite = pywikibot.Site(tolang, tofamily)
if fromsite == tosite:
raise TargetSiteMissing('Target site not different from source site')
gen = genFactory.getCombinedGenerator()
if not gen:
raise TargetPagesMissing('Target pages not specified')
gen_args = ' '.join(gen_args)
pywikibot.output(u"""
Page transfer configuration
---------------------------
Source: %(fromsite)r
Target: %(tosite)r
Pages to transfer: %(gen_args)s
Prefix for transferred pages: %(prefix)s
""" % {'fromsite': fromsite, 'tosite': tosite,
'gen_args': gen_args, 'prefix': prefix})
for page in gen:
summary = 'Moved page from %s' % page.title(asLink=True, insite=tosite)
targetpage = pywikibot.Page(tosite, prefix + page.title())
edithistpage = pywikibot.Page(tosite, prefix + page.title() +
'/edithistory')
if targetpage.exists() and not overwrite:
pywikibot.output(
u"Skipped %s (target page %s exists)" % (
page.title(asLink=True),
targetpage.title(asLink=True)
)
)
continue
pywikibot.output(u"Moving %s to %s..."
% (page.title(asLink=True),
targetpage.title(asLink=True)))
pywikibot.log("Getting page text.")
text = page.get(get_redirect=True)
text += ("<noinclude>\n\n<small>This page was moved from %s. It's "
"edit history can be viewed at %s</small></noinclude>"
% (page.title(asLink=True, insite=targetpage.site),
edithistpage.title(asLink=True, insite=targetpage.site)))
pywikibot.log("Getting edit history.")
historytable = page.getVersionHistoryTable()
pywikibot.log("Putting page text.")
targetpage.put(text, summary=summary)
pywikibot.log("Putting edit history.")
edithistpage.put(historytable, summary=summary)
if __name__ == "__main__":
try:
main()
except TargetSiteMissing as e:
pywikibot.error(u'Need to specify a target site and/or language')
pywikibot.error(u'Try running this script with -help for help/usage')
pywikibot.exception()
except TargetPagesMissing as e:
pywikibot.error(u'Need to specify a page range')
pywikibot.error(u'Try running this script with -help for help/usage')
pywikibot.exception()
| {
"content_hash": "8c7072ba3865ffbe7cac675dd0c121c9",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 79,
"avg_line_length": 29.427777777777777,
"alnum_prop": 0.6299792335284123,
"repo_name": "magul/pywikibot-core",
"id": "e727e644e2bf5ef3c12e77da99f032a0128d48f0",
"size": "5339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/transferbot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4538707"
}
],
"symlink_target": ""
} |
import os
import zope.testrunner
from sparc.testing.fixture import test_suite_mixin
class test_suite(test_suite_mixin):
package = 'sparc.catalog.repoze'
module = 'field'
if __name__ == '__main__':
zope.testrunner.run([
'--path', os.path.dirname(__file__),
'--tests-pattern', os.path.splitext(
os.path.basename(__file__))[0]
]) | {
"content_hash": "a24880200d5ffcd7826a708063d2cc8d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 29.0625,
"alnum_prop": 0.49032258064516127,
"repo_name": "davisd50/sparc.catalog",
"id": "22bc5655b67e46cf0dff95b57792908881c00a57",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparc/catalog/repoze/tests/test_field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21673"
}
],
"symlink_target": ""
} |
import glob
import os
import unittest
import warnings
import comtypes.typeinfo
import comtypes.client
import comtypes.client._generate
from comtypes.test import requires
requires("typelibs")
# filter warnings about interfaces without a base interface; they will
# be skipped in the code generation.
warnings.filterwarnings("ignore",
"Ignoring interface .* which has no base interface",
UserWarning)
# don't print messages when typelib wrappers are generated
comtypes.client._generate.__verbose__ = False
sysdir = os.path.join(os.environ["SystemRoot"], "system32")
progdir = os.environ["ProgramFiles"]
common_progdir = os.environ["CommonProgramFiles"]
# This test takes quite some time. It tries to build wrappers for ALL
# .dll, .tlb, and .ocx files in the system directory which contain typelibs.
class Test(unittest.TestCase):
def setUp(self):
"Do not write the generated files into the comtypes.gen directory"
comtypes.client.gen_dir = None
def tearDown(self):
comtypes.client.gen_dir = comtypes.client._find_gen_dir()
number = 0
def add_test(fname):
global number
def test(self):
try:
comtypes.typeinfo.LoadTypeLibEx(fname)
except WindowsError:
return
comtypes.client.GetModule(fname)
test.__doc__ = "test GetModule(%r)" % fname
setattr(Test, "test_%d" % number, test)
number += 1
for fname in glob.glob(os.path.join(sysdir, "*.ocx")):
add_test(fname)
for fname in glob.glob(os.path.join(sysdir, "*.tlb")):
add_test(fname)
for fname in glob.glob(os.path.join(progdir, r"Microsoft Office\Office*\*.tlb")):
if os.path.basename(fname).lower() in (
"grde50.olb", # UnicodeEncodeError
"xl5de32.olb", # UnicodeEncodeError
"grde50.olb", # UnicodeEncodeError
):
continue
add_test(fname)
for fname in glob.glob(os.path.join(progdir, r"Microsoft Office\Office*\*.olb")):
if os.path.basename(fname).lower() in (
"grde50.olb", # UnicodeEncodeError
"xl5de32.olb", # UnicodeEncodeError
"grde50.olb", # UnicodeEncodeError
):
continue
add_test(fname)
path = os.path.join(progdir, r"Microsoft Visual Studio .NET 2003\Visual Studio SDKs\DIA SDK\bin\msdia71.dll")
if os.path.isfile(path):
print "ADD", path
add_test(path)
for fname in glob.glob(os.path.join(common_progdir, r"Microsoft Shared\Speech\*.dll")):
add_test(fname)
for fname in glob.glob(os.path.join(sysdir, "*.dll")):
# these typelibs give errors:
if os.path.basename(fname).lower() in (
"syncom.dll", # interfaces without base interface
"msvidctl.dll", # assignment to None
"scardssp.dll", # assertionerror sizeof()
"sccsccp.dll", # assertionerror sizeof()
# Typeinfo in comsvcs.dll in XP 64-bit SP 1 is broken.
# Oleview decompiles this code snippet (^ marks are m):
#[
# odl,
# uuid(C7B67079-8255-42C6-9EC0-6994A3548780)
#]
#interface IAppDomainHelper : IDispatch {
# HRESULT _stdcall pfnShutdownCB(void* pv);
# HRESULT _stdcall Initialize(
# [in] IUnknown* pUnkAD,
# [in] IAppDomainHelper __MIDL_0028,
# ^^^^^^^^^^^^^^^^
# [in] void* pPool);
# HRESULT _stdcall pfnCallbackCB(void* pv);
# HRESULT _stdcall DoCallback(
# [in] IUnknown* pUnkAD,
# [in] IAppDomainHelper __MIDL_0029,
# ^^^^^^^^^^^^^^^^
# [in] void* pPool);
#};
"comsvcs.dll",
):
continue
add_test(fname)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b763e6577da5864fe9e29cd02c79a520",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 109,
"avg_line_length": 33.66101694915254,
"alnum_prop": 0.5843403826787512,
"repo_name": "ezarko/cfn-init",
"id": "3a36126c628746bb4f757f7d40fd493baa26373e",
"size": "3972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comtypes/test/test_createwrappers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "539671"
}
],
"symlink_target": ""
} |
import json
from google.appengine.ext import ndb
class Insight(ndb.Model):
"""
Insights are the end result of analyzing a batch of data, such as the
average score for all matches in a year.
key_name is like '2012insights_matchavg'
"""
MATCH_HIGHSCORE = 0
MATCH_HIGHSCORE_BY_WEEK = 1
MATCH_AVERAGES_BY_WEEK = 2
ELIM_MATCH_AVERAGES_BY_WEEK = 3
SCORE_DISTRIBUTION = 4
ELIM_SCORE_DISTRIBUTION = 5
NUM_MATCHES = 6
BLUE_BANNERS = 7
CA_WINNER = 8
RCA_WINNERS = 9
WORLD_CHAMPIONS = 10
WORLD_FINALISTS = 11
DIVISION_WINNERS = 12
DIVISION_FINALISTS = 13
REGIONAL_DISTRICT_WINNERS = 14
SUCCESSFUL_ELIM_TEAMUPS = 15
MATCH_PREDICTIONS = 16
MATCH_AVERAGE_MARGINS_BY_WEEK = 17
ELIM_MATCH_AVERAGE_MARGINS_BY_WEEK = 18
WINNING_MARGIN_DISTRIBUTION = 19
ELIM_WINNING_MARGIN_DISTRIBUTION = 20
YEAR_SPECIFIC_BY_WEEK = 999
YEAR_SPECIFIC = 1000
# Used for datastore keys! Don't change unless you know what you're doing.
INSIGHT_NAMES = {
MATCH_HIGHSCORE: "match_highscore",
MATCH_HIGHSCORE_BY_WEEK: "match_highscore_by_week",
MATCH_AVERAGES_BY_WEEK: "match_averages_by_week",
ELIM_MATCH_AVERAGES_BY_WEEK: "elim_match_averages_by_week",
SCORE_DISTRIBUTION: "score_distribution",
ELIM_SCORE_DISTRIBUTION: "elim_score_distribution",
NUM_MATCHES: "num_matches",
BLUE_BANNERS: "blue_banners",
CA_WINNER: "ca_winner",
RCA_WINNERS: "rca_winners",
WORLD_CHAMPIONS: "world_champions",
WORLD_FINALISTS: "world_finalists",
DIVISION_WINNERS: "division_winners",
DIVISION_FINALISTS: "division_finalists",
REGIONAL_DISTRICT_WINNERS: "regional_district_winners",
SUCCESSFUL_ELIM_TEAMUPS: "successful_elim_teamups",
MATCH_PREDICTIONS: "match_predictions",
MATCH_AVERAGE_MARGINS_BY_WEEK: "match_average_margins_by_week",
ELIM_MATCH_AVERAGE_MARGINS_BY_WEEK: "elim_match_average_margins_by_week",
WINNING_MARGIN_DISTRIBUTION: "winning_margin_distribution",
ELIM_WINNING_MARGIN_DISTRIBUTION: "elim_winning_margin_distribution",
YEAR_SPECIFIC_BY_WEEK: "year_specific_by_week",
YEAR_SPECIFIC: "year_specific",
}
name = ndb.StringProperty(required=True) # general name used for sorting
year = ndb.IntegerProperty(
required=True
) # year this insight pertains to. year = 0 for overall insights
data_json = ndb.TextProperty(
required=True, indexed=False
) # JSON dictionary of the data of the insight
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
self._data = None
super(Insight, self).__init__(*args, **kw)
@property
def data(self):
"""
Lazy load data_json as an OrderedDict
"""
if self._data is None:
self._data = json.loads(self.data_json)
return self._data
@property
def key_name(self):
"""
Returns the string of the key_name of the Insight object before writing it.
"""
return self.render_key_name(self.year, self.name)
@classmethod
def render_key_name(cls, year, name):
if year == 0:
return "insights" + "_" + str(name)
else:
return str(year) + "insights" + "_" + str(name)
| {
"content_hash": "17e97e467593cb9aec97843bdf700d6e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 83,
"avg_line_length": 34.79,
"alnum_prop": 0.6407013509629204,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "ecfd76c6419faa0354296d95029e950086139408",
"size": "3479",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/common/models/insight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
} |
import cgi
import datetime
import html
import logging
import re
import time
import urllib
import urllib.parse
from collections import deque
from dataclasses import dataclass
from typing import (
Any,
Callable,
Dict,
Generic,
List,
Match,
Optional,
Pattern,
Set,
Tuple,
TypeVar,
Union,
)
from urllib.parse import urlencode, urljoin, urlsplit
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
import ahocorasick
import dateutil.parser
import dateutil.tz
import lxml.etree
import markdown
import markdown.blockprocessors
import markdown.inlinepatterns
import markdown.postprocessors
import markdown.treeprocessors
import markdown.util
import re2
import requests
from django.conf import settings
from markdown.blockparser import BlockParser
from markdown.extensions import codehilite, nl2br, sane_lists, tables
from soupsieve import escape as css_escape
from tlds import tld_set
from typing_extensions import TypedDict
from zerver.lib import mention as mention
from zerver.lib.cache import NotFoundInCache, cache_with_key
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import EMOTICON_RE, codepoint_to_name, name_to_codepoint, translate_emoticons
from zerver.lib.exceptions import MarkdownRenderingException
from zerver.lib.markdown import fenced_code
from zerver.lib.markdown.fenced_code import FENCE_RE
from zerver.lib.mention import FullNameInfo, MentionBackend, MentionData
from zerver.lib.outgoing_http import OutgoingSession
from zerver.lib.subdomains import is_static_or_current_realm_url
from zerver.lib.tex import render_tex
from zerver.lib.thumbnail import user_uploads_or_external
from zerver.lib.timeout import TimeoutExpired, timeout
from zerver.lib.timezone import common_timezones
from zerver.lib.types import LinkifierDict
from zerver.lib.url_encoding import encode_stream, hash_util_encode
from zerver.lib.url_preview import preview as link_preview
from zerver.models import EmojiInfo, Message, Realm, linkifiers_for_realm
ReturnT = TypeVar("ReturnT")
# Taken from
# https://html.spec.whatwg.org/multipage/system-state.html#safelisted-scheme
html_safelisted_schemes = (
"bitcoin",
"geo",
"im",
"irc",
"ircs",
"magnet",
"mailto",
"matrix",
"mms",
"news",
"nntp",
"openpgp4fpr",
"sip",
"sms",
"smsto",
"ssh",
"tel",
"urn",
"webcal",
"wtai",
"xmpp",
)
allowed_schemes = ("http", "https", "ftp", "file") + html_safelisted_schemes
def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]:
"""
Use this decorator with extreme caution.
The function you wrap should have no dependency
on any arguments (no args, no kwargs) nor should
it depend on any global state.
"""
val = None
def cache_wrapper() -> ReturnT:
nonlocal val
if val is None:
val = method()
return val
return cache_wrapper
class LinkInfo(TypedDict):
parent: Element
title: Optional[str]
index: Optional[int]
remove: Optional[Element]
@dataclass
class MessageRenderingResult:
rendered_content: str
mentions_wildcard: bool
mentions_user_ids: Set[int]
mentions_user_group_ids: Set[int]
alert_words: Set[str]
links_for_preview: Set[str]
user_ids_with_alert_words: Set[int]
potential_attachment_path_ids: List[str]
@dataclass
class DbData:
mention_data: MentionData
realm_uri: str
realm_alert_words_automaton: Optional[ahocorasick.Automaton]
active_realm_emoji: Dict[str, EmojiInfo]
sent_by_bot: bool
stream_names: Dict[str, int]
translate_emoticons: bool
# Format version of the Markdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar("_T")
ElementStringNone = Union[Element, Optional[str]]
EMOJI_REGEX = r"(?P<syntax>:[\w\-\+]+:)"
def verbose_compile(pattern: str) -> Pattern[str]:
return re.compile(
f"^(.*?){pattern}(.*?)$",
re.DOTALL | re.VERBOSE,
)
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_link_regex() -> Pattern[str]:
# Not using verbose_compile as it adds ^(.*?) and
# (.*?)$ which cause extra overhead of matching
# pattern which is not required.
# With new InlineProcessor these extra patterns
# are not required.
return re.compile(
STREAM_LINK_REGEX,
re.DOTALL | re.VERBOSE,
)
STREAM_TOPIC_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*>]+) # stream name can contain anything except >
> # > acts as separator
(?P<topic_name>[^\*]+) # topic name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_topic_link_regex() -> Pattern[str]:
# Not using verbose_compile as it adds ^(.*?) and
# (.*?)$ which cause extra overhead of matching
# pattern which is not required.
# With new InlineProcessor these extra patterns
# are not required.
return re.compile(
STREAM_TOPIC_LINK_REGEX,
re.DOTALL | re.VERBOSE,
)
LINK_REGEX: Optional[Pattern[str]] = None
def get_web_link_regex() -> Pattern[str]:
# We create this one time, but not at startup. So the
# first message rendered in any process will have some
# extra costs. It's roughly 75ms to run this code, so
# caching the value in LINK_REGEX is super important here.
global LINK_REGEX
if LINK_REGEX is not None:
return LINK_REGEX
tlds = "|".join(list_of_tlds())
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a URL either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r""
REGEX = rf"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:{tlds}) # TLDs
)
)
(?:/ # A path, beginning with /
{nested_paren_chunk} # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
{file_links} # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{{25,34}}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
"""
LINK_REGEX = verbose_compile(REGEX)
return LINK_REGEX
def clear_state_for_testing() -> None:
# The link regex never changes in production, but our tests
# try out both sides of ENABLE_FILE_LINKS, so we need
# a way to clear it.
global LINK_REGEX
LINK_REGEX = None
markdown_logger = logging.getLogger()
def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
"""If the link points to a local destination (e.g. #narrow/...),
generate a relative link that will open it in the current window.
"""
if db_data:
realm_uri_prefix = db_data.realm_uri + "/"
if (
link.startswith(realm_uri_prefix)
and urllib.parse.urljoin(realm_uri_prefix, link[len(realm_uri_prefix) :]) == link
):
return link[len(realm_uri_prefix) :]
return link
def url_embed_preview_enabled(
message: Optional[Message] = None, realm: Optional[Realm] = None, no_previews: bool = False
) -> bool:
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_url_embed_preview
def image_preview_enabled(
message: Optional[Message] = None, realm: Optional[Realm] = None, no_previews: bool = False
) -> bool:
if not settings.INLINE_IMAGE_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_image_preview
def list_of_tlds() -> List[str]:
# Skip a few overly-common false-positives from file extensions
common_false_positives = {"java", "md", "mov", "py", "zip"}
tlds = list(tld_set - common_false_positives)
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(
root: Element, processor: Callable[[Element], Optional[_T]], stop_after_first: bool = False
) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement:
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
@dataclass
class ElementFamily:
grandparent: Optional[Element]
parent: Element
child: Element
in_blockquote: bool
T = TypeVar("T")
class ResultWithFamily(Generic[T]):
family: ElementFamily
result: T
def __init__(self, family: ElementFamily, result: T):
self.family = family
self.result = result
class ElementPair:
parent: Optional["ElementPair"]
value: Element
def __init__(self, parent: Optional["ElementPair"], value: Element):
self.parent = parent
self.value = value
def walk_tree_with_family(
root: Element,
processor: Callable[[Element], Optional[_T]],
) -> List[ResultWithFamily[_T]]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value:
queue.append(ElementPair(parent=currElementPair, value=child))
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = currElementPair.parent
grandparent: Optional[Element] = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child,
in_blockquote=has_blockquote_ancestor(currElementPair),
)
results.append(
ResultWithFamily(
family=family,
result=result,
)
)
return results
def has_blockquote_ancestor(element_pair: Optional[ElementPair]) -> bool:
if element_pair is None:
return False
elif element_pair.value.tag == "blockquote":
return True
else:
return has_blockquote_ancestor(element_pair.parent)
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
"consumer_key": settings.TWITTER_CONSUMER_KEY,
"consumer_secret": settings.TWITTER_CONSUMER_SECRET,
"access_token_key": settings.TWITTER_ACCESS_TOKEN_KEY,
"access_token_secret": settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
# We lazily import twitter here because its import process is
# surprisingly slow, and doing so has a significant impact on
# the startup performance of `manage.py` commands.
import twitter
api = twitter.Api(tweet_mode="extended", **creds)
try:
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, lambda: api.GetStatus(tweet_id))
res = tweet.AsDict()
except TimeoutExpired:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ("code" in t[0]):
# https://developer.twitter.com/en/docs/basics/response-codes
code = t[0]["code"]
if code in [34, 144, 421, 422]:
# All these "correspond with HTTP 404," and mean
# that the message doesn't exist; return None so
# that we will cache the error.
return None
elif code in [63, 179]:
# 63 is that the account is suspended, 179 is that
# it is now locked; cache the None.
return None
elif code in [88, 130, 131]:
# Code 88 means that we were rate-limited, 130
# means Twitter is having capacity issues, and 131
# is other 400-equivalent; in these cases, raise
# the error so we don't cache None and will try
# again later.
raise
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
markdown_logger.exception("Unknown error fetching tweet data", stack_info=True)
return None
return res
class OpenGraphSession(OutgoingSession):
def __init__(self) -> None:
super().__init__(role="markdown", timeout=1)
def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:
og: Dict[str, Optional[str]] = {"image": None, "title": None, "desc": None}
try:
with OpenGraphSession().get(
url, headers={"Accept": "text/html,application/xhtml+xml"}, stream=True
) as res:
if res.status_code != requests.codes.ok:
return None
mimetype, options = cgi.parse_header(res.headers["Content-Type"])
if mimetype not in ("text/html", "application/xhtml+xml"):
return None
html = mimetype == "text/html"
res.raw.decode_content = True
for event, element in lxml.etree.iterparse(
res.raw, events=("start",), no_network=True, remove_comments=True, html=html
):
parent = element.getparent()
if parent is not None:
# Reduce memory usage.
parent.text = None
parent.remove(element)
if element.tag in ("body", "{http://www.w3.org/1999/xhtml}body"):
break
elif element.tag in ("meta", "{http://www.w3.org/1999/xhtml}meta"):
if element.get("property") == "og:image":
content = element.get("content")
if content is not None:
og["image"] = urljoin(res.url, content)
elif element.get("property") == "og:title":
og["title"] = element.get("content")
elif element.get("property") == "og:description":
og["desc"] = element.get("content")
except requests.RequestException:
return None
return None if og["image"] is None else og
def get_tweet_id(url: str) -> Optional[str]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == "twitter.com" or parsed_url.netloc.endswith(".twitter.com")):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == "/" and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(
r"^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$", to_match
)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineImageProcessor(markdown.treeprocessors.Treeprocessor):
"""
Rewrite inline img tags to serve external content via Camo.
This rewrites all images, except ones that are served from the current
realm or global STATIC_URL. This is to ensure that each realm only loads
images that are hosted on that realm or by the global installation,
avoiding information leakage to external domains or between realms. We need
to disable proxying of images hosted on the same realm, because otherwise
we will break images in /user_uploads/, which require authorization to
view.
"""
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
assert url is not None
if is_static_or_current_realm_url(url, self.md.zulip_realm):
# Don't rewrite images on our own site (e.g. emoji, user uploads).
continue
img.set("src", get_camo_url(url))
class BacktickInlineProcessor(markdown.inlinepatterns.BacktickInlineProcessor):
"""Return a `<code>` element containing the matching text."""
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
self, m: Match[str], data: str
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
# Let upstream's implementation do its job as it is, we'll
# just replace the text to not strip the group because it
# makes it impossible to put leading/trailing whitespace in
# an inline code span.
el, start, end = ret = super().handleMatch(m, data)
if el is not None and m.group(3):
# upstream's code here is: m.group(3).strip() rather than m.group(3).
el.text = markdown.util.AtomicString(markdown.util.code_escape(m.group(3)))
return ret
# List from https://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
IMAGE_EXTENSIONS = [".bmp", ".gif", ".jpe", ".jpeg", ".jpg", ".png", ".webp"]
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 10
def add_a(
self,
root: Element,
url: str,
link: str,
title: Optional[str] = None,
desc: Optional[str] = None,
class_attr: str = "message_inline_image",
data_id: Optional[str] = None,
insertion_index: Optional[int] = None,
already_thumbnailed: bool = False,
) -> None:
desc = desc if desc is not None else ""
# Update message.has_image attribute.
if "message_inline_image" in class_attr and self.md.zulip_message:
self.md.zulip_message.has_image = True
if insertion_index is not None:
div = Element("div")
root.insert(insertion_index, div)
else:
div = SubElement(root, "div")
div.set("class", class_attr)
a = SubElement(div, "a")
a.set("href", link)
if title is not None:
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = SubElement(a, "img")
if (
settings.THUMBNAIL_IMAGES
and (not already_thumbnailed)
and user_uploads_or_external(url)
):
# See docs/thumbnailing.md for some high-level documentation.
#
# We strip leading '/' from relative URLs here to ensure
# consistency in what gets passed to /thumbnail
url = url.lstrip("/")
img.set("src", "/thumbnail?" + urlencode({"url": url, "size": "thumbnail"}))
img.set("data-src-fullsize", "/thumbnail?" + urlencode({"url": url, "size": "full"}))
else:
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = SubElement(div, "div")
title_div = SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_oembed_data(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> bool:
oembed_resource_type = extracted_data.get("type", "")
title = extracted_data.get("title")
if oembed_resource_type == "photo":
image = extracted_data.get("image")
if image:
self.add_a(root, image, link, title=title)
return True
elif oembed_resource_type == "video":
html = extracted_data["html"]
image = extracted_data["image"]
title = extracted_data.get("title")
description = extracted_data.get("description")
self.add_a(
root,
image,
link,
title,
description,
"embed-video message_inline_image",
html,
already_thumbnailed=True,
)
return True
return False
def add_embed(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> None:
oembed = extracted_data.get("oembed", False)
if oembed and self.add_oembed_data(root, link, extracted_data):
return
img_link = extracted_data.get("image")
if not img_link:
# Don't add an embed if an image is not found
return
container = SubElement(root, "div")
container.set("class", "message_embed")
img_link = get_camo_url(img_link)
img = SubElement(container, "a")
img.set("style", "background-image: url(" + css_escape(img_link) + ")")
img.set("href", link)
img.set("class", "message_embed_image")
data_container = SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get("title")
if title:
title_elm = SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = SubElement(title_elm, "a")
a.set("href", link)
a.set("title", title)
a.text = title
description = extracted_data.get("description")
if description:
description_elm = SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
def get_actual_image_url(self, url: str) -> str:
# Add specific per-site cases to convert image-preview URLs to image URLs.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc == "github.com" or parsed_url.netloc.endswith(".github.com"):
# https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/main/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split("/")
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin(
"https://raw.githubusercontent.com", "/".join(split_path[0:3] + split_path[4:])
)
return url
def is_image(self, url: str) -> bool:
if not self.md.image_preview_enabled:
return False
parsed_url = urllib.parse.urlparse(url)
# remove HTML URLs which end with image extensions that can not be shorted
if parsed_url.netloc == "pasteboard.co":
return False
for ext in IMAGE_EXTENSIONS:
if parsed_url.path.lower().endswith(ext):
return True
return False
def corrected_image_source(self, url: str) -> Optional[str]:
# This function adjusts any URLs from linx.li and
# wikipedia.org to point to the actual image URL. It's
# structurally very similar to dropbox_image, and possibly
# should be rewritten to use open graph, but has some value.
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc.lower().endswith(".wikipedia.org"):
# Redirecting from "/wiki/File:" to "/wiki/Special:FilePath/File:"
# A possible alternative, that avoids the redirect after hitting "Special:"
# is using the first characters of md5($filename) to generate the URL
domain = parsed_url.scheme + "://" + parsed_url.netloc
correct_url = domain + parsed_url.path[:6] + "Special:FilePath" + parsed_url.path[5:]
return correct_url
if parsed_url.netloc == "linx.li":
return "https://linx.li/s" + parsed_url.path
return None
def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc == "dropbox.com" or parsed_url.netloc.endswith(".dropbox.com"):
is_album = parsed_url.path.startswith("/sc/") or parsed_url.path.startswith("/photos/")
# Only allow preview Dropbox shared links
if not (
parsed_url.path.startswith("/s/") or parsed_url.path.startswith("/sh/") or is_album
):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = {}
image_info["is_image"] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "raw=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
# YouTube video id extraction regular expression from https://pastebin.com/KyKAFv1s
# Slightly modified to support URLs of the forms
# - youtu.be/<id>
# - youtube.com/playlist?v=<id>&list=<list-id>
# - youtube.com/watch_videos?video_ids=<id1>,<id2>,<id3>
# If it matches, match.group(2) is the video id.
schema_re = r"(?:https?://)"
host_re = r"(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)"
param_re = (
r"(?:(?:(?:v|embed)/)|"
+ r"(?:(?:(?:watch|playlist)(?:_popup|_videos)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v(?:ideo_ids)?=))"
)
id_re = r"([0-9A-Za-z_-]+)"
youtube_re = r"^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$"
youtube_re = youtube_re.format(
schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re
)
match = re.match(youtube_re, url)
# URLs of the form youtube.com/playlist?list=<list-id> are incorrectly matched
if match is None or match.group(2) == "playlist":
return None
return match.group(2)
def youtube_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"YouTube - {title}"
return None
def youtube_image(self, url: str) -> Optional[str]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return f"https://i.ytimg.com/vi/{yt_id}/default.jpg"
return None
def vimeo_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
# (http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = (
r"^((http|https)?:\/\/(www\.)?vimeo.com\/"
+ r"(?:channels\/(?:\w+\/)?|groups\/"
+ r"([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$"
)
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"Vimeo - {title}"
return None
def twitter_text(
self,
text: str,
urls: List[Dict[str, str]],
user_mentions: List[Dict[str, Any]],
media: List[Dict[str, Any]],
) -> Element:
"""
Use data from the Twitter API to turn links, mentions and media into A
tags. Also convert Unicode emojis to images.
This works by using the URLs, user_mentions and media data from
the twitter API and searching for Unicode emojis in the text using
`UNICODE_EMOJI_RE`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process: List[Dict[str, Any]] = []
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append(
{
"type": "url",
"start": match.start(),
"end": match.end(),
"url": short_url,
"text": full_url,
}
)
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention["screen_name"]
mention_string = "@" + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append(
{
"type": "mention",
"start": match.start(),
"end": match.end(),
"url": "https://twitter.com/" + urllib.parse.quote(screen_name),
"text": mention_string,
}
)
# Build dicts for media
for media_item in media:
short_url = media_item["url"]
expanded_url = media_item["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append(
{
"type": "media",
"start": match.start(),
"end": match.end(),
"url": short_url,
"text": expanded_url,
}
)
# Build dicts for emojis
for match in re.finditer(UNICODE_EMOJI_RE, text, re.IGNORECASE):
orig_syntax = match.group("syntax")
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ":" + codepoint_to_name[codepoint] + ":"
to_process.append(
{
"type": "emoji",
"start": match.start(),
"end": match.end(),
"codepoint": codepoint,
"title": display_string,
}
)
to_process.sort(key=lambda x: x["start"])
p = current_node = Element("p")
def set_text(text: str) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
db_data: Optional[DbData] = self.md.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item["start"] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index : item["start"]])
current_index = item["end"]
if item["type"] != "emoji":
elem = url_to_a(db_data, item["url"], item["text"])
assert isinstance(elem, Element)
else:
elem = make_emoji(item["codepoint"], item["title"])
current_node = elem
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: str) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user: Dict[str, Any] = res["user"]
tweet = Element("div")
tweet.set("class", "twitter-tweet")
img_a = SubElement(tweet, "a")
img_a.set("href", url)
profile_img = SubElement(img_a, "img")
profile_img.set("class", "twitter-avatar")
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get("profile_image_url_https", user["profile_image_url"])
profile_img.set("src", image_url)
text = html.unescape(res["full_text"])
urls = res.get("urls", [])
user_mentions = res.get("user_mentions", [])
media: List[Dict[str, Any]] = res.get("media", [])
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = SubElement(tweet, "span")
span.text = "- {} (@{})".format(user["name"], user["screen_name"])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item["type"] != "photo":
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item["sizes"].items())
size_name_tuples.sort(reverse=True, key=lambda x: x[1]["h"])
for size_name, size in size_name_tuples:
if size["h"] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = "{}:{}".format(media_item["media_url_https"], size_name)
img_div = SubElement(tweet, "div")
img_div.set("class", "twitter-image")
img_a = SubElement(img_div, "a")
img_a.set("href", media_item["url"])
img = SubElement(img_a, "img")
img.set("src", media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
markdown_logger.warning("Error building Twitter link", exc_info=True)
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:
if e.tag == "a":
url = e.get("href")
assert url is not None
return (url, e.text)
return None
def get_inlining_information(
self,
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
) -> LinkInfo:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
# url != text usually implies a named link, which we opt not to remove
url_eq_text = text is None or url == text
title = None if url_eq_text else text
info: LinkInfo = {
"parent": root,
"title": title,
"index": None,
"remove": None,
}
if parent.tag == "li":
info["parent"] = parent
if not parent.text and not ahref_element.tail and url_eq_text:
info["remove"] = ahref_element
elif parent.tag == "p":
assert grandparent is not None
parent_index = None
for index, uncle in enumerate(grandparent):
if uncle is parent:
parent_index = index
break
# Append to end of list of grandparent's children as normal
info["parent"] = grandparent
if (
len(parent) == 1
and (not parent.text or parent.text == "\n")
and not ahref_element.tail
and url_eq_text
):
info["remove"] = parent
if parent_index is not None:
info["index"] = self.find_proper_insertion_index(grandparent, parent, parent_index)
return info
def handle_image_inlining(
self,
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
) -> None:
info = self.get_inlining_information(root, found_url)
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
self.add_a(
info["parent"], actual_url, url, title=info["title"], insertion_index=info["index"]
)
if info["remove"] is not None:
info["parent"].remove(info["remove"])
def handle_tweet_inlining(
self,
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
twitter_data: Element,
) -> None:
info = self.get_inlining_information(root, found_url)
if info["index"] is not None:
div = Element("div")
root.insert(info["index"], div)
else:
div = SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
def handle_youtube_url_inlining(
self,
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
yt_image: str,
) -> None:
info = self.get_inlining_information(root, found_url)
(url, text) = found_url.result
yt_id = self.youtube_id(url)
self.add_a(
info["parent"],
yt_image,
url,
None,
None,
"youtube-video message_inline_image",
yt_id,
insertion_index=info["index"],
already_thumbnailed=True,
)
def find_proper_insertion_index(
self, grandparent: Element, parent: Element, parent_index_in_grandparent: int
) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
parent_links = [ele.attrib["href"] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(grandparent):
return insertion_index
uncle = grandparent[insertion_index]
inline_image_classes = [
"message_inline_image",
"message_inline_ref",
"inline-preview-twitter",
]
if (
uncle.tag != "div"
or "class" not in uncle.keys()
or uncle.attrib["class"] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib["href"]
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: str) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
unique_urls = {found_url.result[0] for found_url in found_urls}
# Collect unique URLs which are not quoted as we don't do
# inline previews for links inside blockquotes.
unique_previewable_urls = {
found_url.result[0] for found_url in found_urls if not found_url.family.in_blockquote
}
# Set has_link and similar flags whenever a message is processed by Markdown
if self.md.zulip_message:
self.md.zulip_message.has_link = len(found_urls) > 0
self.md.zulip_message.has_image = False # This is updated in self.add_a
for url in unique_urls:
# Due to rewrite_local_links_to_relative, we need to
# handle both relative URLs beginning with
# `/user_uploads` and beginning with `user_uploads`.
# This urllib construction converts the latter into
# the former.
parsed_url = urllib.parse.urlsplit(urllib.parse.urljoin("/", url))
host = parsed_url.netloc
if host != "" and host != self.md.zulip_realm.host:
continue
if not parsed_url.path.startswith("/user_uploads/"):
continue
path_id = parsed_url.path[len("/user_uploads/") :]
self.md.zulip_rendering_result.potential_attachment_path_ids.append(path_id)
if len(found_urls) == 0:
return
if len(unique_previewable_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
processed_urls: Set[str] = set()
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if url in unique_previewable_urls and url not in processed_urls:
processed_urls.add(url)
else:
continue
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing URL preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
self.add_a(
root,
dropbox_image["image"],
url,
title=dropbox_image.get("title"),
desc=dropbox_image.get("desc", ""),
class_attr=class_attr,
already_thumbnailed=True,
)
continue
if self.is_image(url):
image_source = self.corrected_image_source(url)
if image_source is not None:
found_url = ResultWithFamily(
family=found_url.family,
result=(image_source, image_source),
)
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
self.handle_tweet_inlining(root, found_url, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
self.handle_youtube_url_inlining(root, found_url, youtube)
# NOTE: We don't `continue` here, to allow replacing the URL with
# the title, if INLINE_URL_EMBED_PREVIEW feature is enabled.
# The entire preview would ideally be shown only if the feature
# is enabled, but URL previews are a beta feature and YouTube
# previews are pretty stable.
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data and db_data.sent_by_bot:
continue
if not self.md.url_embed_preview_enabled:
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
self.md.zulip_rendering_result.links_for_preview.add(url)
continue
if extracted_data:
if youtube is not None:
title = self.youtube_title(extracted_data)
if title is not None:
if url == text:
found_url.family.child.text = title
else:
found_url.family.child.text = text
continue
self.add_embed(root, url, extracted_data)
if self.vimeo_id(url):
title = self.vimeo_title(extracted_data)
if title:
if url == text:
found_url.family.child.text = title
else:
found_url.family.child.text = text
class CompiledInlineProcessor(markdown.inlinepatterns.InlineProcessor):
def __init__(self, compiled_re: Pattern[str], md: markdown.Markdown) -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
self.md = md
class Timestamp(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
time_input_string = match.group("time")
timestamp = None
try:
timestamp = dateutil.parser.parse(time_input_string, tzinfos=common_timezones)
except ValueError:
try:
timestamp = datetime.datetime.fromtimestamp(float(time_input_string))
except ValueError:
pass
if not timestamp:
error_element = Element("span")
error_element.set("class", "timestamp-error")
error_element.text = markdown.util.AtomicString(
f"Invalid time format: {time_input_string}"
)
return error_element
# Use HTML5 <time> element for valid timestamps.
time_element = Element("time")
if timestamp.tzinfo:
timestamp = timestamp.astimezone(datetime.timezone.utc)
else:
timestamp = timestamp.replace(tzinfo=datetime.timezone.utc)
time_element.set("datetime", timestamp.isoformat().replace("+00:00", "Z"))
# Set text to initial input, so simple clients translating
# HTML to text will at least display something.
time_element.text = markdown.util.AtomicString(time_input_string)
return time_element
# All of our emojis(non ZWJ sequences) belong to one of these Unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f7e0-\U0001f7eb - Coloured Geometric Shapes (NOTE: Not Unicode standard category name)
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
UNICODE_EMOJI_RE = (
"(?P<syntax>["
"\U0001F100-\U0001F64F"
"\U0001F680-\U0001F6FF"
"\U0001F7E0-\U0001F7EB"
"\U0001F900-\U0001F9FF"
"\u2000-\u206F"
"\u2300-\u27BF"
"\u2900-\u297F"
"\u2B00-\u2BFF"
"\u3000-\u303F"
"\u3200-\u32FF"
"])"
)
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend Unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective Unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: str, display_string: str) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = Element("span")
span.set("class", f"emoji emoji-{codepoint}")
span.set("title", title)
span.set("role", "img")
span.set("aria-label", title)
span.text = markdown.util.AtomicString(display_string)
return span
def make_realm_emoji(src: str, display_string: str) -> Element:
elt = Element("img")
elt.set("src", src)
elt.set("class", "emoji")
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
# Unicode codepoints are minimum of length 4, padded with zeroes
return f"{ord(unicode_emoji):04x}"
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
"""Translates emoticons like `:)` into emoji like `:smile:`."""
def handleMatch(self, match: Match[str]) -> Optional[Element]:
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data is None or not db_data.translate_emoticons:
return None
emoticon = match.group("emoticon")
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group("syntax")
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ":" + codepoint_to_name[codepoint] + ":"
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Union[str, Element]]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji: Dict[str, EmojiInfo] = {}
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data.active_realm_emoji
if name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]["source_url"], orig_syntax)
elif name == "zulip":
return make_realm_emoji(
"/static/generated/emoji/images/emoji/unicode/zulip.png", orig_syntax
)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return orig_syntax
def content_has_emoji_syntax(content: str) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Element:
rendered = render_tex(match.group("body"), is_inline=True)
if rendered is not None:
return self.md.htmlStash.store(rendered)
else: # Something went wrong while rendering
span = Element("span")
span.set("class", "tex-error")
span.text = markdown.util.AtomicString("$$" + match.group("body") + "$$")
return span
def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a URL against XSS attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(" ", "%20"))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad URL - so bad it couldn't be parsed.
return ""
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == "" and netloc == "" and "@" in path:
scheme = "mailto"
elif scheme == "" and netloc == "" and len(path) > 0 and path[0] == "/":
# Allow domain-relative links
return urllib.parse.urlunparse(("", "", path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ("", "", "", "", "") and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(("", "", "", "", "", fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url("http://" + url)
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we allow a fixed set of schemes.
if scheme not in allowed_schemes:
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# URL passes all tests. Return URL as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(
db_data: Optional[DbData], url: str, text: Optional[str] = None
) -> Union[Element, str]:
a = Element("a")
href = sanitize_url(url)
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(db_data, href)
a.set("href", href)
a.text = text
return a
class CompiledPattern(markdown.inlinepatterns.Pattern):
def __init__(self, compiled_re: Pattern[str], md: markdown.Markdown) -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
self.md = md
class AutoLink(CompiledPattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group("url")
db_data: Optional[DbData] = self.md.zulip_db_data
return url_to_a(db_data, url)
class OListProcessor(sane_lists.SaneOListProcessor):
def __init__(self, parser: BlockParser) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class UListProcessor(sane_lists.SaneUListProcessor):
"""Unordered lists, but with 2-space indent"""
def __init__(self, parser: BlockParser) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
"""Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: BlockParser) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class HashHeaderProcessor(markdown.blockprocessors.HashHeaderProcessor):
"""Process hash headers.
Based on markdown.blockprocessors.HashHeaderProcessor, but requires space for heading.
"""
# Original regex for hashheader is
# RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
RE = re.compile(r"(?:^|\n)(?P<level>#{1,6})\s(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)")
class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor):
"""Process block quotes.
Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent
"""
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
RE = re.compile(r"(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))" r"[ ]{0,3}>[ ]?(.*)")
# run() is very slightly forked from the base class; see notes below.
def run(self, parent: Element, blocks: List[str]) -> None:
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[: m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing first.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from beginning of each line.
block = "\n".join([self.clean(line) for line in block[m.start() :].split("\n")])
# Zulip modification: The next line is patched to match
# CommonMark rather than original Markdown. In original
# Markdown, blockquotes with a blank line between them were
# merged, which makes it impossible to break a blockquote with
# a blank line intentionally.
#
# This is a new blockquote. Create a new parent element.
quote = etree.SubElement(parent, "blockquote")
# Recursively parse block with blockquote as parent.
# change parser state so blockquotes embedded in lists use p tags
self.parser.state.set("blockquote")
self.parser.parseChunk(quote, block)
self.parser.state.reset()
def clean(self, line: str) -> str:
# Silence all the mentions inside blockquotes
line = mention.MENTIONS_RE.sub(lambda m: "@_**{}**".format(m.group("match")), line)
# Silence all the user group mentions inside blockquotes
line = mention.USER_GROUP_MENTIONS_RE.sub(lambda m: "@_*{}*".format(m.group("match")), line)
# And then run the upstream processor's code for removing the '>'
return super().clean(line)
@dataclass
class Fence:
fence_str: str
is_code: bool
class MarkdownListPreprocessor(markdown.preprocessors.Preprocessor):
"""Allows list blocks that come directly after another block
to be rendered as a list.
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile(r"^[ ]*([*+-]|\d\.)[ ]+(.*)", re.MULTILINE)
def run(self, lines: List[str]) -> List[str]:
"""Insert a newline between a paragraph and ulist if missing"""
inserts = 0
in_code_fence: bool = False
open_fences: List[Fence] = []
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block but not quoted.
# We ignore all lines where some parent is a non quote code block.
m = FENCE_RE.match(lines[i])
if m:
fence_str = m.group("fence")
lang: Optional[str] = m.group("lang")
is_code = lang not in ("quote", "quoted")
has_open_fences = not len(open_fences) == 0
matches_last_fence = (
fence_str == open_fences[-1].fence_str if has_open_fences else False
)
closes_last_fence = not lang and matches_last_fence
if closes_last_fence:
open_fences.pop()
else:
open_fences.append(Fence(fence_str, is_code))
in_code_fence = any(fence.is_code for fence in open_fences)
# If we're not in a fenced block and we detect an upcoming list
# hanging off any block (including a list of another type), add
# a newline.
li1 = self.LI_RE.match(lines[i])
li2 = self.LI_RE.match(lines[i + 1])
if not in_code_fence and lines[i]:
if (li2 and not li1) or (
li1 and li2 and (len(li1.group(1)) == 1) != (len(li2.group(1)) == 1)
):
copy.insert(i + inserts + 1, "")
inserts += 1
return copy
# Name for the outer capture group we use to separate whitespace and
# other delimiters from the actual content. This value won't be an
# option in user-entered capture groups.
BEFORE_CAPTURE_GROUP = "linkifier_before_match"
OUTER_CAPTURE_GROUP = "linkifier_actual_match"
AFTER_CAPTURE_GROUP = "linkifier_after_match"
def prepare_linkifier_pattern(source: str) -> str:
"""Augment a linkifier so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as
OUTER_CAPTURE_GROUP."""
return rf"""(?P<{BEFORE_CAPTURE_GROUP}>^|\s|['"\(,:<])(?P<{OUTER_CAPTURE_GROUP}>{source})(?P<{AFTER_CAPTURE_GROUP}>$|[^\pL\pN])"""
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class LinkifierPattern(CompiledInlineProcessor):
"""Applied a given linkifier to the input"""
def __init__(
self,
source_pattern: str,
format_string: str,
md: markdown.Markdown,
) -> None:
# Do not write errors to stderr (this still raises exceptions)
options = re2.Options()
options.log_errors = False
compiled_re2 = re2.compile(prepare_linkifier_pattern(source_pattern), options=options)
# Find percent-encoded bytes and escape them from the python
# interpolation. That is:
# %(foo)s -> %(foo)s
# %% -> %%
# %ab -> %%ab
# %%ab -> %%ab
# %%%ab -> %%%%ab
#
# We do this here, rather than before storing, to make edits
# to the underlying linkifier more straightforward, and
# because JS does not have a real formatter.
self.format_string = re.sub(
r"(?<!%)(%%)*%([a-fA-F0-9][a-fA-F0-9])", r"\1%%\2", format_string
)
super().__init__(compiled_re2, md)
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
self, m: Match[str], data: str
) -> Union[Tuple[Element, int, int], Tuple[None, None, None]]:
db_data: Optional[DbData] = self.md.zulip_db_data
url = url_to_a(
db_data,
self.format_string % m.groupdict(),
markdown.util.AtomicString(m.group(OUTER_CAPTURE_GROUP)),
)
if isinstance(url, str):
return None, None, None
return (
url,
m.start(2),
m.end(2),
)
class UserMentionPattern(CompiledInlineProcessor):
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
self, m: Match[str], data: str
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
name = m.group("match")
silent = m.group("silent") == "_"
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data is not None:
wildcard = mention.user_mention_matches_wildcard(name)
# For @**|id** and @**name|id** mention syntaxes.
id_syntax_match = re.match(r"(?P<full_name>.+)?\|(?P<user_id>\d+)$", name)
if id_syntax_match:
full_name = id_syntax_match.group("full_name")
id = int(id_syntax_match.group("user_id"))
user = db_data.mention_data.get_user_by_id(id)
# For @**name|id**, we need to specifically check that
# name matches the full_name of user in mention_data.
# This enforces our decision that
# @**user_1_name|id_for_user_2** should be invalid syntax.
if full_name:
if user and user.full_name != full_name:
return None, None, None
else:
# For @**name** syntax.
user = db_data.mention_data.get_user_by_name(name)
if wildcard:
if not silent:
self.md.zulip_rendering_result.mentions_wildcard = True
user_id = "*"
elif user is not None:
assert isinstance(user, FullNameInfo)
if not silent:
self.md.zulip_rendering_result.mentions_user_ids.add(user.id)
name = user.full_name
user_id = str(user.id)
else:
# Don't highlight @mentions that don't refer to a valid user
return None, None, None
el = Element("span")
el.set("data-user-id", user_id)
text = f"{name}"
if silent:
el.set("class", "user-mention silent")
else:
el.set("class", "user-mention")
text = f"@{text}"
el.text = markdown.util.AtomicString(text)
return el, m.start(), m.end()
return None, None, None
class UserGroupMentionPattern(CompiledInlineProcessor):
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
self, m: Match[str], data: str
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
name = m.group("match")
silent = m.group("silent") == "_"
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data is not None:
user_group = db_data.mention_data.get_user_group(name)
if user_group:
if not silent:
self.md.zulip_rendering_result.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None, None, None
el = Element("span")
el.set("data-user-group-id", user_group_id)
if silent:
el.set("class", "user-group-mention silent")
text = f"{name}"
else:
el.set("class", "user-group-mention")
text = f"@{name}"
el.text = markdown.util.AtomicString(text)
return el, m.start(), m.end()
return None, None, None
class StreamPattern(CompiledInlineProcessor):
def find_stream_id(self, name: str) -> Optional[int]:
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data is None:
return None
stream_id = db_data.stream_names.get(name)
return stream_id
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
self, m: Match[str], data: str
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
name = m.group("stream_name")
stream_id = self.find_stream_id(name)
if stream_id is None:
return None, None, None
el = Element("a")
el.set("class", "stream")
el.set("data-stream-id", str(stream_id))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
# Also do the same for StreamTopicPattern.
stream_url = encode_stream(stream_id, name)
el.set("href", f"/#narrow/stream/{stream_url}")
text = f"#{name}"
el.text = markdown.util.AtomicString(text)
return el, m.start(), m.end()
class StreamTopicPattern(CompiledInlineProcessor):
def find_stream_id(self, name: str) -> Optional[int]:
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data is None:
return None
stream_id = db_data.stream_names.get(name)
return stream_id
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
self, m: Match[str], data: str
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
stream_name = m.group("stream_name")
topic_name = m.group("topic_name")
stream_id = self.find_stream_id(stream_name)
if stream_id is None or topic_name is None:
return None, None, None
el = Element("a")
el.set("class", "stream-topic")
el.set("data-stream-id", str(stream_id))
stream_url = encode_stream(stream_id, stream_name)
topic_url = hash_util_encode(topic_name)
link = f"/#narrow/stream/{stream_url}/topic/{topic_url}"
el.set("href", link)
text = f"#{stream_name} > {topic_name}"
el.text = markdown.util.AtomicString(text)
return el, m.start(), m.end()
def possible_linked_stream_names(content: str) -> Set[str]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
for match in re.finditer(STREAM_TOPIC_LINK_REGEX, content, re.VERBOSE):
matches.append(match.group("stream_name"))
return set(matches)
class AlertWordNotificationProcessor(markdown.preprocessors.Preprocessor):
allowed_before_punctuation = {" ", "\n", "(", '"', ".", ",", "'", ";", "[", "*", "`", ">"}
allowed_after_punctuation = {
" ",
"\n",
")",
'",',
"?",
":",
".",
",",
"'",
";",
"]",
"!",
"*",
"`",
}
def check_valid_start_position(self, content: str, index: int) -> bool:
if index <= 0 or content[index] in self.allowed_before_punctuation:
return True
return False
def check_valid_end_position(self, content: str, index: int) -> bool:
if index >= len(content) or content[index] in self.allowed_after_punctuation:
return True
return False
def run(self, lines: List[str]) -> List[str]:
db_data: Optional[DbData] = self.md.zulip_db_data
if db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set self.md.zulip_rendering_result.user_ids_with_alert_words.
realm_alert_words_automaton = db_data.realm_alert_words_automaton
if realm_alert_words_automaton is not None:
content = "\n".join(lines).lower()
for end_index, (original_value, user_ids) in realm_alert_words_automaton.iter(
content
):
if self.check_valid_start_position(
content, end_index - len(original_value)
) and self.check_valid_end_position(content, end_index + 1):
self.md.zulip_rendering_result.user_ids_with_alert_words.update(user_ids)
return lines
class LinkInlineProcessor(markdown.inlinepatterns.LinkInlineProcessor):
def zulip_specific_link_changes(self, el: Element) -> Union[None, Element]:
href = el.get("href")
assert href is not None
# Sanitize URL or don't parse link. See linkify_tests in markdown_test_cases for banned syntax.
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None # no-op; the link is not processed.
# Rewrite local links to be relative
db_data: Optional[DbData] = self.md.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
# Make changes to <a> tag attributes
el.set("href", href)
# Show link href if title is empty
if not el.text or not el.text.strip():
el.text = href
# Prevent linkifiers from running on the content of a Markdown link, breaking up the link.
# This is a monkey-patch, but it might be worth sending a version of this change upstream.
el.text = markdown.util.AtomicString(el.text)
return el
def handleMatch( # type: ignore[override] # supertype incompatible with supersupertype
self, m: Match[str], data: str
) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
ret = super().handleMatch(m, data)
if ret[0] is not None:
el: Optional[Element]
el, match_start, index = ret
el = self.zulip_specific_link_changes(el)
if el is not None:
return el, match_start, index
return None, None, None
def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry:
# Registry is a new class added by Python-Markdown to replace OrderedDict.
# Since Registry doesn't support .keys(), it is easier to make a new
# object instead of removing keys from the existing object.
new_r = markdown.util.Registry()
for k in keys:
new_r.register(r[k], k, r.get_index_for_name(k))
return new_r
# These are used as keys ("linkifiers_keys") to md_engines and the respective
# linkifier caches
DEFAULT_MARKDOWN_KEY = -1
ZEPHYR_MIRROR_MARKDOWN_KEY = -2
class Markdown(markdown.Markdown):
zulip_message: Optional[Message]
zulip_realm: Optional[Realm]
zulip_db_data: Optional[DbData]
zulip_rendering_result: Optional[MessageRenderingResult]
image_preview_enabled: bool
url_embed_preview_enabled: bool
def __init__(
self,
linkifiers: List[LinkifierDict],
linkifiers_key: int,
email_gateway: bool,
) -> None:
self.linkifiers = linkifiers
self.linkifiers_key = linkifiers_key
self.email_gateway = email_gateway
super().__init__(
extensions=[
nl2br.makeExtension(),
tables.makeExtension(),
codehilite.makeExtension(
linenums=False,
guess_lang=False,
),
],
)
self.set_output_format("html")
def build_parser(self) -> markdown.Markdown:
# Build the parser using selected default features from Python-Markdown.
# The complete list of all available processors can be found in the
# super().build_parser() function.
#
# Note: for any Python-Markdown updates, manually check if we want any
# of the new features added upstream or not; they wouldn't get
# included by default.
self.preprocessors = self.build_preprocessors()
self.parser = self.build_block_parser()
self.inlinePatterns = self.build_inlinepatterns()
self.treeprocessors = self.build_treeprocessors()
self.postprocessors = self.build_postprocessors()
self.handle_zephyr_mirror()
return self
def build_preprocessors(self) -> markdown.util.Registry:
# We disable the following preprocessors from upstream:
#
# html_block - insecure
# reference - references don't make sense in a chat context.
preprocessors = markdown.util.Registry()
preprocessors.register(MarkdownListPreprocessor(self), "hanging_lists", 35)
preprocessors.register(
markdown.preprocessors.NormalizeWhitespace(self), "normalize_whitespace", 30
)
preprocessors.register(fenced_code.FencedBlockPreprocessor(self), "fenced_code_block", 25)
preprocessors.register(
AlertWordNotificationProcessor(self), "custom_text_notifications", 20
)
return preprocessors
def build_block_parser(self) -> BlockParser:
# We disable the following blockparsers from upstream:
#
# indent - replaced by ours
# setextheader - disabled; we only support hashheaders for headings
# olist - replaced by ours
# ulist - replaced by ours
# quote - replaced by ours
parser = BlockParser(self)
parser.blockprocessors.register(
markdown.blockprocessors.EmptyBlockProcessor(parser), "empty", 95
)
parser.blockprocessors.register(ListIndentProcessor(parser), "indent", 90)
if not self.email_gateway:
parser.blockprocessors.register(
markdown.blockprocessors.CodeBlockProcessor(parser), "code", 85
)
parser.blockprocessors.register(HashHeaderProcessor(parser), "hashheader", 80)
# We get priority 75 from 'table' extension
parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), "hr", 70)
parser.blockprocessors.register(OListProcessor(parser), "olist", 65)
parser.blockprocessors.register(UListProcessor(parser), "ulist", 60)
parser.blockprocessors.register(BlockQuoteProcessor(parser), "quote", 55)
parser.blockprocessors.register(
markdown.blockprocessors.ParagraphProcessor(parser), "paragraph", 50
)
return parser
def build_inlinepatterns(self) -> markdown.util.Registry:
# We disable the following upstream inline patterns:
#
# backtick - replaced by ours
# escape - probably will re-add at some point.
# link - replaced by ours
# image_link - replaced by ours
# autolink - replaced by ours
# automail - replaced by ours
# linebreak - we use nl2br and consider that good enough
# html - insecure
# reference - references not useful
# image_reference - references not useful
# short_reference - references not useful
# ---------------------------------------------------
# strong_em - for these three patterns,
# strong2 - we have our own versions where
# emphasis2 - we disable _ for bold and emphasis
# Declare regexes for clean single line calls to .register().
#
# Custom strikethrough syntax: ~~foo~~
DEL_RE = r"(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)"
# Custom bold syntax: **foo** but not __foo__
# str inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
EMPHASIS_RE = r"(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*"
STRONG_RE = r"(\*\*)([^\n]+?)\2"
STRONG_EM_RE = r"(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*"
TEX_RE = r"\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B"
TIMESTAMP_RE = r"<time:(?P<time>[^>]*?)>"
# Add inline patterns. We use a custom numbering of the
# rules, that preserves the order from upstream but leaves
# space for us to add our own.
reg = markdown.util.Registry()
reg.register(BacktickInlineProcessor(markdown.inlinepatterns.BACKTICK_RE), "backtick", 105)
reg.register(
markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, "strong,em"), "strong_em", 100
)
reg.register(UserMentionPattern(mention.MENTIONS_RE, self), "usermention", 95)
reg.register(Tex(TEX_RE, self), "tex", 90)
reg.register(StreamTopicPattern(get_compiled_stream_topic_link_regex(), self), "topic", 87)
reg.register(StreamPattern(get_compiled_stream_link_regex(), self), "stream", 85)
reg.register(Timestamp(TIMESTAMP_RE), "timestamp", 75)
reg.register(
UserGroupMentionPattern(mention.USER_GROUP_MENTIONS_RE, self), "usergroupmention", 65
)
reg.register(LinkInlineProcessor(markdown.inlinepatterns.LINK_RE, self), "link", 60)
reg.register(AutoLink(get_web_link_regex(), self), "autolink", 55)
# Reserve priority 45-54 for linkifiers
reg = self.register_linkifiers(reg)
reg.register(
markdown.inlinepatterns.HtmlInlineProcessor(markdown.inlinepatterns.ENTITY_RE, self),
"entity",
40,
)
reg.register(markdown.inlinepatterns.SimpleTagPattern(STRONG_RE, "strong"), "strong", 35)
reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, "em"), "emphasis", 30)
reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, "del"), "del", 25)
reg.register(
markdown.inlinepatterns.SimpleTextInlineProcessor(
markdown.inlinepatterns.NOT_STRONG_RE
),
"not_strong",
20,
)
reg.register(Emoji(EMOJI_REGEX, self), "emoji", 15)
reg.register(EmoticonTranslation(EMOTICON_RE, self), "translate_emoticons", 10)
# We get priority 5 from 'nl2br' extension
reg.register(UnicodeEmoji(UNICODE_EMOJI_RE), "unicodeemoji", 0)
return reg
def register_linkifiers(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry:
for linkifier in self.linkifiers:
pattern = linkifier["pattern"]
inlinePatterns.register(
LinkifierPattern(pattern, linkifier["url_format"], self),
f"linkifiers/{pattern}",
45,
)
return inlinePatterns
def build_treeprocessors(self) -> markdown.util.Registry:
# Here we build all the processors from upstream, plus a few of our own.
treeprocessors = markdown.util.Registry()
# We get priority 30 from 'hilite' extension
treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), "inline", 25)
treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), "prettify", 20)
treeprocessors.register(
InlineInterestingLinkProcessor(self), "inline_interesting_links", 15
)
if settings.CAMO_URI:
treeprocessors.register(InlineImageProcessor(self), "rewrite_images_proxy", 10)
return treeprocessors
def build_postprocessors(self) -> markdown.util.Registry:
# These are the default Python-Markdown processors, unmodified.
postprocessors = markdown.util.Registry()
postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), "raw_html", 20)
postprocessors.register(
markdown.postprocessors.AndSubstitutePostprocessor(), "amp_substitute", 15
)
postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), "unescape", 10)
return postprocessors
def handle_zephyr_mirror(self) -> None:
if self.linkifiers_key == ZEPHYR_MIRROR_MARKDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
self.inlinePatterns = get_sub_registry(self.inlinePatterns, ["autolink"])
self.treeprocessors = get_sub_registry(
self.treeprocessors, ["inline_interesting_links", "rewrite_images_proxy"]
)
# insert new 'inline' processor because we have changed self.inlinePatterns
# but InlineProcessor copies md as self.md in __init__.
self.treeprocessors.register(
markdown.treeprocessors.InlineProcessor(self), "inline", 25
)
self.preprocessors = get_sub_registry(self.preprocessors, ["custom_text_notifications"])
self.parser.blockprocessors = get_sub_registry(
self.parser.blockprocessors, ["paragraph"]
)
md_engines: Dict[Tuple[int, bool], Markdown] = {}
linkifier_data: Dict[int, List[LinkifierDict]] = {}
def make_md_engine(linkifiers_key: int, email_gateway: bool) -> None:
md_engine_key = (linkifiers_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
linkifiers = linkifier_data[linkifiers_key]
md_engines[md_engine_key] = Markdown(
linkifiers=linkifiers,
linkifiers_key=linkifiers_key,
email_gateway=email_gateway,
)
# Split the topic name into multiple sections so that we can easily use
# our common single link matching regex on it.
basic_link_splitter = re.compile(r"[ !;\?\),\'\"]")
# Security note: We don't do any HTML escaping in this
# function on the URLs; they are expected to be HTML-escaped when
# rendered by clients (just as links rendered into message bodies
# are validated and escaped inside `url_to_a`).
def topic_links(linkifiers_key: int, topic_name: str) -> List[Dict[str, str]]:
matches: List[Dict[str, Union[str, int]]] = []
linkifiers = linkifiers_for_realm(linkifiers_key)
options = re2.Options()
options.log_errors = False
for linkifier in linkifiers:
raw_pattern = linkifier["pattern"]
url_format_string = linkifier["url_format"]
try:
pattern = re2.compile(prepare_linkifier_pattern(raw_pattern), options=options)
except re2.error:
# An invalid regex shouldn't be possible here, and logging
# here on an invalid regex would spam the logs with every
# message sent; simply move on.
continue
pos = 0
while pos < len(topic_name):
m = pattern.search(topic_name, pos)
if m is None:
break
match_details = m.groupdict()
match_text = match_details[OUTER_CAPTURE_GROUP]
# Adjust the start point of the match for the next
# iteration -- we rewind the non-word character at the
# end, if there was one, so a potential next match can
# also use it.
pos = m.end() - len(match_details[AFTER_CAPTURE_GROUP])
# We format the linkifier's url string using the matched text.
# Also, we include the matched text in the response, so that our clients
# don't have to implement any logic of their own to get back the text.
matches += [
dict(
url=url_format_string % match_details,
text=match_text,
index=topic_name.find(match_text),
)
]
# Also make raw URLs navigable.
for sub_string in basic_link_splitter.split(topic_name):
link_match = re.match(get_web_link_regex(), sub_string)
if link_match:
actual_match_url = link_match.group("url")
result = urlsplit(actual_match_url)
if not result.scheme:
if not result.netloc:
i = (result.path + "/").index("/")
result = result._replace(netloc=result.path[:i], path=result.path[i:])
url = result._replace(scheme="https").geturl()
else:
url = actual_match_url
matches.append(
dict(url=url, text=actual_match_url, index=topic_name.find(actual_match_url))
)
# In order to preserve the order in which the links occur, we sort the matched text
# based on its starting index in the topic. We pop the index field before returning.
matches = sorted(matches, key=lambda k: k["index"])
return [{k: str(v) for k, v in match.items() if k != "index"} for match in matches]
def maybe_update_markdown_engines(linkifiers_key: int, email_gateway: bool) -> None:
global linkifier_data
linkifiers = linkifiers_for_realm(linkifiers_key)
if linkifiers_key not in linkifier_data or linkifier_data[linkifiers_key] != linkifiers:
# Linkifier data has changed, update `linkifier_data` and any
# of the existing Markdown engines using this set of linkifiers.
linkifier_data[linkifiers_key] = linkifiers
for email_gateway_flag in [True, False]:
if (linkifiers_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(linkifiers_key, email_gateway_flag)
if (linkifiers_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(linkifiers_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile("\\w")
def privacy_clean_markdown(content: str) -> str:
return repr(_privacy_re.sub("x", content))
def do_convert(
content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message] = None,
message_realm: Optional[Realm] = None,
sent_by_bot: bool = False,
translate_emoticons: bool = False,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
no_previews: bool = False,
) -> MessageRenderingResult:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for Markdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
linkifiers_key = DEFAULT_MARKDOWN_KEY
else:
linkifiers_key = message_realm.id
if message and hasattr(message, "id") and message.id:
logging_message_id = "id# " + str(message.id)
else:
logging_message_id = "unknown"
if message is not None and message_realm is not None:
if message_realm.is_zephyr_mirror_realm:
if message.sending_client.name == "zephyr_mirror":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
linkifiers_key = ZEPHYR_MIRROR_MARKDOWN_KEY
maybe_update_markdown_engines(linkifiers_key, email_gateway)
md_engine_key = (linkifiers_key, email_gateway)
_md_engine = md_engines[md_engine_key]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
rendering_result: MessageRenderingResult = MessageRenderingResult(
rendered_content="",
mentions_wildcard=False,
mentions_user_ids=set(),
mentions_user_group_ids=set(),
alert_words=set(),
links_for_preview=set(),
user_ids_with_alert_words=set(),
potential_attachment_path_ids=[],
)
_md_engine.zulip_message = message
_md_engine.zulip_rendering_result = rendering_result
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
_md_engine.image_preview_enabled = image_preview_enabled(message, message_realm, no_previews)
_md_engine.url_embed_preview_enabled = url_embed_preview_enabled(
message, message_realm, no_previews
)
# Pre-fetch data from the DB that is used in the Markdown thread
if message_realm is not None:
# Here we fetch the data structures needed to render
# mentions/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_backend = MentionBackend(message_realm.id)
mention_data = MentionData(mention_backend, content)
stream_names = possible_linked_stream_names(content)
stream_name_info = mention_data.get_stream_name_map(stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = {}
_md_engine.zulip_db_data = DbData(
realm_alert_words_automaton=realm_alert_words_automaton,
mention_data=mention_data,
active_realm_emoji=active_realm_emoji,
realm_uri=message_realm.uri,
sent_by_bot=sent_by_bot,
stream_names=stream_name_info,
translate_emoticons=translate_emoticons,
)
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. Markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a linkifier that makes some syntax
# infinite-loop).
rendering_result.rendered_content = timeout(5, lambda: _md_engine.convert(content))
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
MAX_MESSAGE_LENGTH = settings.MAX_MESSAGE_LENGTH
if len(rendering_result.rendered_content) > MAX_MESSAGE_LENGTH * 100:
raise MarkdownRenderingException(
f"Rendered content exceeds {MAX_MESSAGE_LENGTH * 100} characters (message {logging_message_id})"
)
return rendering_result
except Exception:
cleaned = privacy_clean_markdown(content)
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
markdown_logger.exception(
"Exception in Markdown parser; input (sanitized) was: %s\n (message %s)",
cleaned,
logging_message_id,
)
raise MarkdownRenderingException()
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None
markdown_time_start = 0.0
markdown_total_time = 0.0
markdown_total_requests = 0
def get_markdown_time() -> float:
return markdown_total_time
def get_markdown_requests() -> int:
return markdown_total_requests
def markdown_stats_start() -> None:
global markdown_time_start
markdown_time_start = time.time()
def markdown_stats_finish() -> None:
global markdown_total_time
global markdown_total_requests
global markdown_time_start
markdown_total_requests += 1
markdown_total_time += time.time() - markdown_time_start
def markdown_convert(
content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message] = None,
message_realm: Optional[Realm] = None,
sent_by_bot: bool = False,
translate_emoticons: bool = False,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
no_previews: bool = False,
) -> MessageRenderingResult:
markdown_stats_start()
ret = do_convert(
content,
realm_alert_words_automaton,
message,
message_realm,
sent_by_bot,
translate_emoticons,
mention_data,
email_gateway,
no_previews=no_previews,
)
markdown_stats_finish()
return ret
| {
"content_hash": "b5ee06e854957dac36eb8cf00b4ee00b",
"timestamp": "",
"source": "github",
"line_count": 2628,
"max_line_length": 160,
"avg_line_length": 39.092465753424655,
"alnum_prop": 0.5875212926461284,
"repo_name": "kou/zulip",
"id": "00df4937bd50ddc3284f25c5e838b4c52f1128e0",
"size": "102861",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/lib/markdown/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433376"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "635452"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3361648"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79932"
},
{
"name": "Python",
"bytes": "8142846"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "134587"
},
{
"name": "TypeScript",
"bytes": "20233"
}
],
"symlink_target": ""
} |
from unipath import Path
import os
PROJECT_ROOT = Path(__file__).ancestor(2)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Paul Hallett', '[email protected]'),
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
BASE_URL = 'http://pokeapi.co'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['.pokeapi.co', 'localhost']
TIME_ZONE = 'Europe/London'
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Explicitly define test runner to avoid warning messages on test execution
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
MEDIA_ROOT = PROJECT_ROOT.child('media')
MEDIA_URL = '/media/'
STATIC_ROOT = PROJECT_ROOT.child('assets')
STATIC_URL = '/assets/'
STATICFILES_DIRS = (
# '/pokemon/assets/',
# 'pokemon_v2/assets/',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = '4nksdock439320df*(^x2_scm-o$*py3e@-awu-n^hipkm%2l$sw$&2l#'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
TEMPLATE_DIRS = (
PROJECT_ROOT.child('templates'),
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'pokeapi_co_db',
'USER': 'root',
'PASSWORD': 'pokeapi',
'HOST': 'localhost',
'PORT': '',
'CONN_MAX_AGE': 30
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SECRET_KEY = os.environ.get(
'SECRET_KEY',
'ubx+22!jbo(^x2_scm-o$*py3e@-awu-n^hipkm%2l$sw$&2l#')
CUSTOM_APPS = (
'tastypie',
'pokemon',
'pokemon_v2',
'hits',
'alerts',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'corsheaders',
'rest_framework',
'markdown_deux',
'cachalot'
) + CUSTOM_APPS
API_LIMIT_PER_PAGE = 1
TASTYPIE_DEFAULT_FORMATS = ['json']
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'GET'
)
CORS_URLS_REGEX = r'^/api/.*$'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'drf_ujson.renderers.UJSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'drf_ujson.renderers.UJSONRenderer',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 20,
'PAGINATE_BY': 20,
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'anon': '1000/hour'
}
}
MARKDOWN_DEUX_STYLES = {
"default": {
"extras": {
"code-friendly": None,
"tables": None,
"fenced-code-blocks": None,
"header-ids": None
},
"safe_mode": False,
},
}
# Stripe
STRIPE_TEST_SECRET_KEY = os.environ.get('STRIPE_TEST_SECRET_KEY', '')
STRIPE_SECRET_KEY = os.environ.get('STRIPE_SECRET_KEY', '')
STRIPE_PUBLISHABLE_KEY = os.environ.get('STRIPE_PUBLISHABLE_KEY', '')
STRIPE_TEST_PUBLISHABLE_KEY = os.environ.get('STRIPE_TEST_PUBLISHABLE_KEY', '')
if DEBUG:
STRIPE_KEYS = {
"secret": STRIPE_TEST_SECRET_KEY,
"publishable": STRIPE_TEST_PUBLISHABLE_KEY
}
else:
STRIPE_KEYS = {
"secret": STRIPE_SECRET_KEY,
"publishable": STRIPE_PUBLISHABLE_KEY
}
| {
"content_hash": "497b63b9af0313286c3cfa312eb8d03a",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 82,
"avg_line_length": 23.683417085427134,
"alnum_prop": 0.6541481009972416,
"repo_name": "phalt/pokeapi",
"id": "1b2731b4d91a8c16dc69788b7a2793f649095441",
"size": "4735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5766"
},
{
"name": "HTML",
"bytes": "61939"
},
{
"name": "JavaScript",
"bytes": "2447"
},
{
"name": "Makefile",
"bytes": "459"
},
{
"name": "Python",
"bytes": "784630"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2020 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
import logging
import time
from pywinauto.base_wrapper import BaseWrapper
from core.orbit_e2e import E2ETestCase, wait_for_condition
class MoveTab(E2ETestCase):
"""
Move a tab from the right widget to the left, and back again. Verify the position after each move.
"""
def right_click_move_context(self, item):
item.click_input(button='right')
context_menu = self.suite.application.window(best_match="TabBarContextMenu")
self.find_control("MenuItem", name_contains="Move",
parent=context_menu).click_input(button='left')
@staticmethod
def _count_tabs(tab_control: BaseWrapper) -> int:
return len(tab_control.children(control_type='TabItem'))
def _execute(self, tab_title, tab_name):
# Find tab and left and right tab bar
tab_item = self.find_control("TabItem", tab_title)
right_tab_bar = self.find_control("Tab",
parent=self.find_control("Group", "RightTabWidget"),
recurse=False)
left_tab_bar = self.find_control("Tab",
parent=self.find_control("Group", "MainTabWidget"),
recurse=False)
# Init tests
left_tab_count = self._count_tabs(left_tab_bar)
right_tab_count = self._count_tabs(right_tab_bar)
tab_parent = tab_item.parent()
self.expect_eq(tab_parent, right_tab_bar,
"%s tab is initialized in the right pane" % tab_title)
# Move "Functions" tab to the left pane, check no. of tabs and if the tab is enabled
logging.info('Moving tab to the left pane (current tab count: %d)', right_tab_count)
self.right_click_move_context(tab_item)
self.expect_eq(self._count_tabs(right_tab_bar), right_tab_count - 1,
"1 tab removed from right pane")
self.expect_eq(self._count_tabs(left_tab_bar), left_tab_count + 1,
"1 tab added to the left pane")
tab_item = self.find_control("TabItem", name=tab_title)
self.expect_eq(tab_item.parent(), left_tab_bar, "Tab is parented under the left pane")
self.expect_true(self.find_control("Group", name=tab_name).is_visible(), "Tab is visible")
# Move back, check no. of tabs
logging.info('Moving "%s" tab back to the right pane', tab_title)
self.right_click_move_context(tab_item)
self.expect_eq(self._count_tabs(right_tab_bar), right_tab_count,
"1 tab removed from left pane")
self.expect_eq(self._count_tabs(left_tab_bar), left_tab_count,
"1 tab added to the right pane")
tab_item = self.find_control("TabItem", name=tab_title)
self.expect_eq(tab_item.parent(), right_tab_bar, "Tab is parented under the right pane")
self.expect_true(
self.find_control("Group", name=tab_name).is_visible(), "Functions tab is visible")
class EndSession(E2ETestCase):
"""
Click menu entry to end session.
"""
def _execute(self):
time.sleep(1)
app_menu = self.suite.top_window().descendants(control_type="MenuBar")[1]
app_menu.item_by_path("File->End Session").click_input()
wait_for_condition(
lambda: self.suite.application.top_window().class_name() ==
"orbit_session_setup::SessionSetupDialog", 30)
self.suite.top_window(force_update=True)
class DismissDialog(E2ETestCase):
"""
Find a dialog, compare the title and dismiss it by clicking 'OK'.
"""
def _execute(self, title_contains: str):
searchRegex = ".*" + title_contains + ".*"
dialog_box = self.suite.top_window().child_window(title_re=searchRegex,
control_type="Window")
self.expect_true(dialog_box is not None, 'Dialog found.')
ok_button = dialog_box.descendants(control_type='Button', title='OK')[0]
self.expect_true(ok_button is not None, 'OK Button found.')
ok_button.click_input()
class RenameMoveCaptureFile(E2ETestCase):
"""
Click menu entry "Rename/Move Capture File", enter new_capture_path and confirm it.
"""
def _execute(self, new_capture_path):
app_menu = self.suite.top_window().descendants(control_type="MenuBar")[1]
app_menu.item_by_path("File->Rename/Move Capture File").click_input()
wait_for_condition(lambda: self.find_control('Edit', 'File name:') is not None,
max_seconds=120)
file_name_edit = self.find_control('Edit', 'File name:')
file_name_edit.set_edit_text(new_capture_path)
save_button = self.find_control('Button', 'Save')
save_button.click_input()
# This waits until no more "Window"s can be found. That means only the OrbitMainWindow is
# visible, no warnings, errors or progression dialogs
wait_for_condition(
lambda: self.find_control(control_type="Window", raise_on_failure=False) is None,
max_seconds=30)
| {
"content_hash": "49b72e4d058999825691e92a30547800",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 102,
"avg_line_length": 42.301587301587304,
"alnum_prop": 0.6112570356472795,
"repo_name": "google/orbit",
"id": "7f4a7aa21098b89b9b149a70ae007d00de5322f4",
"size": "5330",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "contrib/automation_tests/test_cases/main_window.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "220990"
},
{
"name": "C++",
"bytes": "7042919"
},
{
"name": "CMake",
"bytes": "185711"
},
{
"name": "Jupyter Notebook",
"bytes": "1930"
},
{
"name": "PowerShell",
"bytes": "6590"
},
{
"name": "Python",
"bytes": "241593"
},
{
"name": "Shell",
"bytes": "19524"
},
{
"name": "Starlark",
"bytes": "36414"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5), \
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual('a', string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertEqual(['Sausage', 'Egg','Cheese'], words)
def test_strings_can_be_split_with_different_patterns(self):
import re # import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertEqual(['the','rain', 'in','spain'], words)
# `pattern` is a Python regular expression pattern which matches
# ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual('\\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual('Guido', 'guido'.capitalize())
self.assertEqual('GUIDO', 'guido'.upper())
self.assertEqual('timbot', 'TimBot'.lower())
self.assertEqual('Guido Van Rossum', 'guido van rossum'.title())
self.assertEqual('tOtAlLy AwEsOmE', 'ToTaLlY aWeSoMe'.swapcase())
| {
"content_hash": "0ec9d349593bb220af61c253132d1e54",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 37.41095890410959,
"alnum_prop": 0.6188209447088978,
"repo_name": "vatsala/python_koans",
"id": "be41a03718c9efb0626e1dcd7c5d2030a4bc499f",
"size": "2778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_string_manipulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "Python",
"bytes": "316779"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
import io
import logging
import research.coding.varbyte
from research.index.common import Metadata
from research.index.common import raise_property_not_found
from research.lexicon import ArrayLexicon
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class ForwardIndex:
def __init__(self, properties):
self.metadata = ForwardIndexMetadata(properties)
def reader(self):
return ForwardIndexReader(self.metadata)
def writer(self):
return ForwardIndexWriter(self.metadata)
def prune(self, term_pruner, output_index):
logger.info("Pruning index {0} with pruner {1}".format(self.metadata.name, type(term_pruner).__name__))
logger.info("Processing term file")
with open(self.metadata.terms_path) as input_term_file, \
open(output_index.metadata.terms_path, "w") as output_term_file:
def write_term(t):
output_term_file.write(t)
return 1
offsets = [write_term(term) if term_pruner.test(term[:-1]) else 0 for term in input_term_file]
for i in range(1, len(offsets)):
offsets[i] += offsets[i - 1]
logger.info("Pruning documents")
reader = self.reader()
writer = output_index.writer()
byte_offset = 0
document = reader.next_document()
doc_offset = 0
while document is not None:
term_count = 0
byte_count = 0
term_id = document.next_term_id()
while term_id is not None:
if (term_id > 0 and offsets[term_id] == offsets[term_id - 1]) \
or (term_id == 0 and offsets[term_id] == 0):
byte_count += writer.write_term_id(term_id - offsets[term_id])
term_count += 1
term_id = document.next_term_id()
if term_count > 0:
writer.write_document_info(document.title, document.doc_id - doc_offset, byte_offset, byte_count,
term_count)
else:
doc_offset += 1
byte_offset += byte_count
document = reader.next_document()
class ForwardIndexMetadata(Metadata):
f_doc_info = "doc_info"
f_collection = "collection"
f_terms = "terms"
def check_path(self, field):
if field not in self.paths:
raise_property_not_found(field)
else:
return self.paths[field]
def __init__(self, properties):
super(ForwardIndexMetadata, self).__init__(properties)
assert properties[Metadata.f_type] == "{0}.{1}".format(ForwardIndex.__module__, ForwardIndex.__name__)
if Metadata.f_coding not in properties:
self.coder_module = research.coding.varbyte
else:
self.coder_module = research.utils.get_class_of(properties[Metadata.f_coding])
if Metadata.f_path not in properties:
raise_property_not_found(Metadata.f_path)
else:
self.paths = properties[Metadata.f_path]
self.doc_info_path = self.check_path(ForwardIndexMetadata.f_doc_info)
self.collection_path = self.check_path(ForwardIndexMetadata.f_collection)
self.terms_path = self.check_path(ForwardIndexMetadata.f_terms)
class ForwardIndexReader:
def __init__(self, metadata):
self.doc_info_reader = io.open(metadata.doc_info_path, 'r')
self.term_stream = io.open(metadata.collection_path, 'br')
self.decoder = metadata.coder_module.Decoder(self.term_stream)
self.lexicon = ArrayLexicon(metadata.terms_path)
self.last_doc = None
def next_document(self):
if self.last_doc is not None:
self.last_doc.flush()
meta_line = self.doc_info_reader.readline()
if meta_line == "":
return None
else:
(title, doc_id, offset, size, count) = Document.parse_meta(meta_line)
self.last_doc = Document(title, doc_id, count, self.decoder, self.lexicon)
return self.last_doc
def find_by_title(self, title):
meta_line = self.doc_info_reader.readline()
while meta_line != "":
(doc_title, doc_id, offset, size, count) = Document.parse_meta(meta_line)
if title == doc_title:
self.term_stream.seek(offset)
return Document(title, doc_id, count, self.decoder, self.lexicon)
meta_line = self.doc_info_reader.readline()
return None
def find_by_id(self, id):
meta_line = self.doc_info_reader.readline()
while meta_line != "":
(title, doc_id, offset, size, count) = Document.parse_meta(meta_line)
if id == doc_id:
self.term_stream.seek(offset)
return Document(title, doc_id, count, self.decoder, self.lexicon)
meta_line = self.doc_info_reader.readline()
return None
def skip(self, n):
for i in range(n):
meta_line = self.doc_info_reader.readline()
if meta_line != "":
(title, doc_id, offset, size, count) = Document.parse_meta(meta_line)
self.term_stream.seek(offset)
def close(self):
self.doc_info_reader.close()
self.term_stream.close()
class ForwardIndexWriter:
def __init__(self, metadata):
self.doc_info_writer = io.open(metadata.doc_info_path, 'w')
self.term_stream = io.open(metadata.collection_path, 'bw')
self.encoder = metadata.coder_module.Encoder(self.term_stream)
def write_term_id(self, n):
return self.encoder.encode(n)
def write_document_info(self, title, doc_id, offset, byte_count, term_count):
self.doc_info_writer.write("{0} {1} {2} {3} {4}".format(title, doc_id, offset, byte_count, term_count))
def close(self):
self.doc_info_writer.close()
self.term_stream.close()
class Document:
def __init__(self, title, doc_id, count, decoder, lexicon):
self.title = title
self.doc_id = doc_id
self.count = count
self.remaining = count
self.decoder = decoder
self.lexicon = lexicon
def next_term_id(self):
if self.remaining == 0:
return None
else:
self.remaining -= 1
return self.decoder.decode()
def next_term(self):
term_id = self.next_term_id()
return self.lexicon[term_id] if term_id is not None else None
def flush(self):
while self.remaining > 0:
self.next_term_id()
@staticmethod
def parse_meta(meta_line):
fields = meta_line.split()
if len(fields) != 5:
raise ValueError("expected 5 fields in document meta file, but %d found" % len(fields))
return fields[0], int(fields[1]), int(fields[2]), int(fields[3]), int(fields[4])
| {
"content_hash": "f2224371efdeb2f5dcea29bf48ed379d",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 113,
"avg_line_length": 35.33163265306123,
"alnum_prop": 0.592202166064982,
"repo_name": "west-tandon/ReSearch",
"id": "cee95bd9b635e0d94fbde952023f5e50f187be08",
"size": "6925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/index/forward.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2573"
},
{
"name": "Makefile",
"bytes": "1533"
},
{
"name": "Python",
"bytes": "54787"
}
],
"symlink_target": ""
} |
"""
A :term:`NIC` (Network Interface Card) is a logical entity that provides a
:term:`Partition` with access to external communication networks through a
:term:`Network Adapter`. More specifically, a NIC connects a Partition with a
:term:`Network Port`, or with a :term:`Virtual Switch` which then connects to
the Network Port.
NIC resources are contained in Partition resources.
NICs only exist in :term:`CPCs <CPC>` that are in DPM mode.
"""
from __future__ import absolute_import
import copy
from ._manager import BaseManager
from ._resource import BaseResource
from ._logging import logged_api_call
from ._utils import matches_filters, RC_NIC
__all__ = ['NicManager', 'Nic']
class NicManager(BaseManager):
"""
Manager providing access to the :term:`NICs <NIC>` in a particular
:term:`Partition`.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable of a
:class:`~zhmcclient.Partition` object (in DPM mode):
* :attr:`~zhmcclient.Partition.nics`
"""
def __init__(self, partition):
# This function should not go into the docs.
# Parameters:
# partition (:class:`~zhmcclient.Partition`):
# Partition defining the scope for this manager.
super(NicManager, self).__init__(
resource_class=Nic,
class_name=RC_NIC,
session=partition.manager.session,
parent=partition,
base_uri='{}/nics'.format(partition.uri),
oid_prop='element-id',
uri_prop='element-uri',
name_prop='name',
query_props=[],
list_has_name=False)
@property
def partition(self):
"""
:class:`~zhmcclient.Partition`: :term:`Partition` defining the scope
for this manager.
"""
return self._parent
@logged_api_call
def list(self, full_properties=False, filter_args=None):
"""
List the NICs in this Partition.
Authorization requirements:
* Object-access permission to this Partition.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as returned by the list
operation.
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen, i.e. all resources are
returned.
Returns:
: A list of :class:`~zhmcclient.Nic` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
resource_obj_list = []
uris = self.partition.get_property('nic-uris')
if uris:
for uri in uris:
resource_obj = self.resource_class(
manager=self,
uri=uri,
name=None,
properties=None)
if matches_filters(resource_obj, filter_args):
resource_obj_list.append(resource_obj)
if full_properties:
resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
@logged_api_call
def create(self, properties):
"""
Create and configure a NIC in this Partition.
The NIC must be backed by an adapter port (on an OSA, ROCE, or
Hipersockets adapter).
The way the backing adapter port is specified in the "properties"
parameter of this method depends on the adapter type, as follows:
* For OSA and Hipersockets adapters, the "virtual-switch-uri"
property is used to specify the URI of the virtual switch that is
associated with the backing adapter port.
This virtual switch is a resource that automatically exists as soon
as the adapter resource exists. Note that these virtual switches do
not show up in the HMC GUI; but they do show up at the HMC REST API
and thus also at the zhmcclient API as the
:class:`~zhmcclient.VirtualSwitch` class.
The value for the "virtual-switch-uri" property can be determined
from a given adapter name and port index as shown in the following
example code (omitting any error handling):
.. code-block:: python
partition = ... # Partition object for the new NIC
adapter_name = 'OSA #1' # name of adapter with backing port
adapter_port_index = 0 # port index of backing port
adapter = partition.manager.cpc.adapters.find(name=adapter_name)
vswitches = partition.manager.cpc.virtual_switches.findall(
**{'backing-adapter-uri': adapter.uri})
vswitch = None
for vs in vswitches:
if vs.get_property('port') == adapter_port_index:
vswitch = vs
break
properties['virtual-switch-uri'] = vswitch.uri
* For RoCE adapters, the "network-adapter-port-uri" property is used to
specify the URI of the backing adapter port, directly.
The value for the "network-adapter-port-uri" property can be
determined from a given adapter name and port index as shown in the
following example code (omitting any error handling):
.. code-block:: python
partition = ... # Partition object for the new NIC
adapter_name = 'ROCE #1' # name of adapter with backing port
adapter_port_index = 0 # port index of backing port
adapter = partition.manager.cpc.adapters.find(name=adapter_name)
port = adapter.ports.find(index=adapter_port_index)
properties['network-adapter-port-uri'] = port.uri
Authorization requirements:
* Object-access permission to this Partition.
* Object-access permission to the backing Adapter for the new NIC.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create NIC' in the :term:`HMC API` book.
Returns:
Nic:
The resource object for the new NIC.
The object will have its 'element-uri' property set as returned by
the HMC, and will also have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result = self.session.post(self.partition.uri + '/nics',
body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
nic = Nic(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return nic
class Nic(BaseResource):
"""
Representation of a :term:`NIC`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
For the properties of a NIC resource, see section
'Data model - NIC Element Object' in section 'Partition object' in the
:term:`HMC API` book.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.NicManager`).
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.NicManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, NicManager), \
"Nic init: Expected manager type %s, got %s" % \
(NicManager, type(manager))
super(Nic, self).__init__(manager, uri, name, properties)
@logged_api_call
def delete(self):
"""
Delete this NIC.
Authorization requirements:
* Object-access permission to the Partition containing this HBA.
* Task permission to the "Partition Details" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.delete(self._uri)
self.manager._name_uri_cache.delete(
self.get_properties_local(self.manager._name_prop, None))
parent_nic_uris = self.manager.parent.get_properties_local(
'nic-uris')
if parent_nic_uris:
try:
parent_nic_uris.remove(self._uri)
except ValueError:
pass
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this NIC.
This method serializes with other methods that access or change
properties on the same Python object.
Authorization requirements:
* Object-access permission to the Partition containing this NIC.
* Object-access permission to the backing Adapter for this NIC.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model - NIC Element Object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.post(self.uri, body=properties)
is_rename = self.manager._name_prop in properties
if is_rename:
# Delete the old name from the cache
self.manager._name_uri_cache.delete(self.name)
self.update_properties_local(copy.deepcopy(properties))
if is_rename:
# Add the new name to the cache
self.manager._name_uri_cache.update(self.name, self.uri)
| {
"content_hash": "72acac1f93547c0657d36fd6599e5a0f",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 34.901234567901234,
"alnum_prop": 0.6090378493102229,
"repo_name": "zhmcclient/python-zhmcclient",
"id": "3a2b0e46c15584203c1078b76b2dc84fd58033ca",
"size": "11907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zhmcclient/_nic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "18123"
},
{
"name": "Python",
"bytes": "2361296"
},
{
"name": "Shell",
"bytes": "12292"
}
],
"symlink_target": ""
} |
"""
Created on Fri Jun 05 21:36:21 2015
@author: Paco
"""
from api import API
class NineGag(API):
_class_name = '9Gag'
_category = 'Social'
_help_url = 'https://github.com/k3min/infinigag'
_api_url = 'http://infinigag.eu01.aws.af.cm/'
def _parsing_data(self,data):
res = {'description':list(),'link':list()}
for d in data['data']:
res['description'].append(self._tools.key_test('caption',d))
res['link'].append(self._tools.key_test('link',d))
return res
def get_trend(self):
url = self._api_url+'trending/0'
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data(data)
def get_hot(self):
url = self._api_url+'hot/0'
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data(data) | {
"content_hash": "d87aecbdecc2c250e277a9f698f92116",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 28.181818181818183,
"alnum_prop": 0.556989247311828,
"repo_name": "franblas/pyAPI",
"id": "ea677555bc00e2afec7005d8fdc1a6d2d894d14d",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyapi/ninegag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76708"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from chatterbot.adapters.output import OutputAdapter
class OutputAdapterTestCase(TestCase):
"""
This test case is for the OutputAdapter base class.
Although this class is not intended for direct use,
this test case ensures that exceptions requiring
basic functionality are triggered when needed.
"""
def setUp(self):
super(OutputAdapterTestCase, self).setUp()
self.adapter = OutputAdapter()
def test_process_response(self):
with self.assertRaises(OutputAdapter.AdapterMethodNotImplementedError):
self.adapter.process_response('', 0)
| {
"content_hash": "458b46e01fb5ec0cc2b2f81aa6cb7079",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 35.22222222222222,
"alnum_prop": 0.7287066246056783,
"repo_name": "osDanielLee/SelfThinkingRobot",
"id": "d50adc2405c8121abce5acbcf91f5a420b44147e",
"size": "634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AnalyzeData/tests/output_adapter_tests/test_output_adapter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "277962"
}
],
"symlink_target": ""
} |
"""Train a Fast R-CNN network on a region of interest database."""
import argparse
import pprint
import numpy as np
import pdb
import sys
import os.path
import tensorflow as tf
# this_dir = os.path.dirname(__file__)
# sys.path.insert(0, this_dir + '/..')
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.insert(0, os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
# print(sys.path)
# for p in sys.path: print p
# print (sys.path)
from lib.fast_rcnn.train import get_training_roidb, train_net
from lib.fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, get_log_dir
from lib.datasets.factory import get_imdb
from lib.networks.factory import get_network
from lib.fast_rcnn.config import cfg
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=70000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='kitti_train', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--restore', dest='restore',
help='restore or not',
default=0, type=int)
if len(sys.argv) == 1:
parser.print_help()
# sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
imdb = get_imdb(args.imdb_name)
print(('Loaded dataset `{:s}` for training'.format(imdb.name)))
roidb = get_training_roidb(imdb)
output_dir = get_output_dir(imdb, None)
log_dir = get_log_dir(imdb)
print(('Output will be saved to `{:s}`'.format(output_dir)))
print(('Logs will be saved to `{:s}`'.format(log_dir)))
device_name = '/gpu:{:d}'.format(args.gpu_id)
print(device_name)
with tf.device(device_name):
network = get_network(args.network_name)
print(('Use network `{:s}` in training'.format(args.network_name)))
# import os
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu_id)
train_net(network, imdb, roidb,
output_dir=output_dir,
log_dir=log_dir,
pretrained_model=args.pretrained_model,
max_iters=args.max_iters,
restore=bool(int(args.restore)))
| {
"content_hash": "f2922ff0662280093cf84106152dcdc6",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 103,
"avg_line_length": 37.850467289719624,
"alnum_prop": 0.5841975308641976,
"repo_name": "Zardinality/TF_Deformable_Net",
"id": "1138a045f1b28b032a59702a86c5ab83b7eef5fb",
"size": "4300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faster_rcnn/train_net.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11339"
},
{
"name": "C++",
"bytes": "169036"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Jupyter Notebook",
"bytes": "10553"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "Python",
"bytes": "771417"
},
{
"name": "Shell",
"bytes": "8887"
}
],
"symlink_target": ""
} |
from ftplib import FTP
import sys, os, os.path, operator
def upload(handle,filename):
f = open(filename,"rb")
(base,ext) = os.path.splitext(filename)
picext = ".bmp .jpg .jpeg .dib .tif .tiff .gif .png"
if(operator.contains(picext,ext)):
try:
handle.storbinary("STOR " + filename,f,1)
except Exception:
print "Successful upload."
else:
print "Successful upload."
f.close()
return
try:
handle.storbinary("STOR " + filename,f)
except Exception:
print "Successful upload."
else:
print "Successful upload."
f.close()
return
def download(handle,filename):
f2 = open(filename,"wb")
try:
handle.retrbinary("RETR " + filename,f2.write)
except Exception:
print "Error in downloading the remote file."
return
else:
print "Successful download!"
f2.close()
return
print "CLIFTP ~ NSP Corp.\n\n"
host_name = raw_input("Enter website name to connect to, exclude ftp notation: ")
if "http://" in host_name:
host_name = host_name.replace("http://","")
host_name = host_name.replace("\n","")
user = raw_input("Enter username: ")
pwd = raw_input("Enter password: ")
try: ftph = FTP(host_name)
except:
print "Host could not be resolved."
raw_input()
sys.exit()
else: pass
try:
ftph.login(user,pwd)
except Exception:
if user == "anonymous" or user == "Anonymous" and pwd == "anonymous" or pwd == "Anonymous":
print "The server does not accept anonymous requests."
raw_input()
sys.exit()
else:
print "Invalid login combination."
raw_input()
sys.exit()
else:
print "Successfully connected!\n"
print ftph.getwelcome()
flag = 1
count = 0
path = ftph.pwd()
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
print "Press help at any time to see proper usage.\n"
while flag:
command = raw_input("FTP ]> ")
if "get " in command:
rf = command.replace("get ","")
rf = rf.replace("\n","")
download(ftph,rf)
continue
elif "put " in command:
lf = command.replace("put ","")
lf = lf.replace("\n","")
upload(ftph,lf)
ftph.close()
ftph = FTP(host_name)
ftph.login(user,pwd)
continue
elif "makedir " in command:
mkdirname = command.replace("makedir ","")
mkdirname = mkdirname.replace("\n","")
try: ftph.mkd(mkdirname)
except:
print "Incorrect usage."
continue
else:
print "Directory created."
continue
elif "remdir " in command:
rmdirname = command.replace("remdir ","")
rmdirname = rmdirname.replace("\n","")
current = ftph.pwd()
ftph.cwd(rmdirname)
allfiles = ftph.nlst()
for file in allfiles:
try:
ftph.delete(file)
except Exception:
pass
else:
pass
ftph.cwd(current)
try:
ftph.rmd(rmdirname)
except Exception:
print "All files within the directory have been deleted, but there is still another directory inside. As deleting this directory automatically goes against true FTP protocol, you must manually delete it, before you can delete the entire directory."
else:
print "Directory deleted."
continue
elif command == "dir":
print ftph.dir()
continue
elif command == "currdir":
print ftph.pwd()
continue
elif "chdir " in command:
dirpath = command.replace("chdir ","")
dirpath = dirpath.replace("\n","")
ftph.cwd(dirpath)
print "Directory changed to " + dirpath
continue
elif command == "up":
dir = ftph.pwd()
temp = dir
index = len(dir) - 1
for i in range(index,0,-1):
if temp[i] == "/" and i != len(dir):
ftph.cwd(temp)
print "One directory back."
continue
if(operator.contains(charset,dir[i])):
temp = temp[:-1]
if temp=="/":
ftph.cwd(temp)
print "One directory back."
elif command == "rename":
fromname = raw_input("Current file name: ")
toname = raw_input("To be changed to: ")
ftph.rename(fromname,toname)
print "Successfully renamed."
continue
elif "delete " in command:
delfile = command.replace("delete ","")
delfile = delfile.replace("\n","")
ftph.delete(delfile)
print "File successfully deleted."
continue
elif command == "term":
ftph.close()
print "Session ended."
raw_input()
sys.exit()
elif "size " in command:
szfile = command.replace("size ","")
szfile = szfile.replace("\n","")
print "The file is " + str(ftph.size(szfile)) + " bytes."
continue
elif command == "debug -b":
ftph.set_debuglevel(1)
print "Debug mode set to base."
continue
elif command == "debug -v":
ftph.set_debuglevel(2)
print "Debug mode set to verbose."
continue
elif command == "debug -o":
ftph.set_debuglevel(0)
print "Debug mode turned off."
continue
elif command == "help":
print "debug -o - turns off debug output\n"
print "debug -v - turns the debug output to verbose mode\n"
print "debug -b - turns the debug output to base\n"
print "size [filename] - returns the size in bytes of the specified file"
print "term - terminate the ftp session\n"
print "delete [filename] - delete a file\n"
print "rename - rename a file\n"
print "up - navigate 1 directory up\n"
print "chdir [path] - change which directory you're in\n"
print "currdir - prints the path of the directory you are currently in\n"
print "dir - lists the contents of the directory\n"
print "remdir [directory path] - removes/deletes an entire directory\n"
print "makedir [directory path] - creates a new directory\n"
print "put [filename] - stores a local file onto the server (does not work with microsoft office document types)\n"
print "get [filename] - download a remote file onto your computer\n\n"
continue
else:
print "Sorry, invalid command. Check 'help' for proper usage."
continue
#EoF
| {
"content_hash": "d343f01e7bb98f59706b2a96f0c2b3a4",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 252,
"avg_line_length": 27.96,
"alnum_prop": 0.6763233190271817,
"repo_name": "ActiveState/code",
"id": "fc08717a6d31b61c61f0206174c48bbf5fb31c8e",
"size": "5592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/521925_Python_FTP_Client/recipe-521925.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from collections import defaultdict
from flask import redirect
from flask import render_template
from flask_login import current_user
from markupsafe import escape
from sqlalchemy import and_
from sqlalchemy_continuum import version_class
from sqlalchemy_continuum import versioning_manager
from config import TRACKER_ADVISORY_URL
from config import TRACKER_BUGTRACKER_URL
from config import TRACKER_GROUP_URL
from config import TRACKER_ISSUE_URL
from config import TRACKER_LOG_ENTRIES_PER_PAGE
from config import TRACKER_SUMMARY_LENGTH_MAX
from tracker import db
from tracker import tracker
from tracker.advisory import advisory_escape_html
from tracker.advisory import advisory_extend_html
from tracker.advisory import advisory_format_issue_listing
from tracker.advisory import generate_advisory
from tracker.advisory import render_html_advisory
from tracker.form.advisory import AdvisoryForm
from tracker.model import CVE
from tracker.model import Advisory
from tracker.model import CVEGroup
from tracker.model import CVEGroupEntry
from tracker.model import CVEGroupPackage
from tracker.model import Package
from tracker.model.advisory import advisory_regex
from tracker.model.cve import cve_id_regex
from tracker.model.cvegroup import pkgname_regex
from tracker.model.cvegroup import vulnerability_group_regex
from tracker.model.enum import Publication
from tracker.model.enum import Remote
from tracker.model.enum import Status
from tracker.model.package import filter_duplicate_packages
from tracker.model.package import sort_packages
from tracker.user import user_can_delete_group
from tracker.user import user_can_delete_issue
from tracker.user import user_can_edit_group
from tracker.user import user_can_edit_issue
from tracker.user import user_can_handle_advisory
from tracker.user import user_can_watch_log
from tracker.user import user_can_watch_user_log
from tracker.util import json_response
from tracker.util import multiline_to_list
from tracker.view.error import not_found
def get_bug_project(databases):
bug_project_mapping = {
1: ['core', 'extra', 'testing'],
5: ['community', 'community-testing', 'multilib', 'multilib-testing']
}
for category, repos in bug_project_mapping.items():
if all((database in repos for database in databases)):
return category
# Fallback
return 1
def get_bug_data(cves, pkgs, versions, group):
references = []
references = [ref for ref in multiline_to_list(group.reference)
if ref not in references]
list(map(lambda issue: references.extend(
[ref for ref in multiline_to_list(issue.reference) if ref not in references]), cves))
severity_sorted_issues = sorted(cves, key=lambda issue: issue.issue_type)
severity_sorted_issues = sorted(severity_sorted_issues, key=lambda issue: issue.severity)
unique_issue_types = []
for issue in severity_sorted_issues:
if issue.issue_type not in unique_issue_types:
unique_issue_types.append(issue.issue_type)
bug_desc = render_template('bug.txt', cves=cves, group=group, references=references,
pkgs=pkgs, unique_issue_types=unique_issue_types,
TRACKER_ISSUE_URL=TRACKER_ISSUE_URL,
TRACKER_GROUP_URL=TRACKER_GROUP_URL)
pkg_str = ' '.join((pkg.pkgname for pkg in pkgs))
group_type = 'multiple issues' if len(unique_issue_types) > 1 else unique_issue_types[0]
summary = '[{}] [Security] {} ({})'.format(pkg_str, group_type, ' '.join([cve.id for cve in cves]))
if TRACKER_SUMMARY_LENGTH_MAX != 0 and len(summary) > TRACKER_SUMMARY_LENGTH_MAX:
summary = "[{}] [Security] {} (Multiple CVE's)".format(pkg_str, group_type)
# 5: critical, 4: high, 3: medium, 2: low, 1: very low.
severitiy_mapping = {
'unknown': 3,
'critical': 5,
'high': 4,
'medium': 3,
'low': 2,
}
task_severity = severitiy_mapping.get(group.severity.name)
project = get_bug_project((pkg.database for pkg in versions))
return {
'project': project,
'product_category': 13, # security
'item_summary': summary,
'task_severity': task_severity,
'detailed_desc': bug_desc
}
def get_cve_data(cve):
cve_model = CVE.query.get(cve)
if not cve_model:
return None
entries = (db.session.query(CVEGroupEntry, CVEGroup, CVEGroupPackage, Advisory)
.filter_by(cve=cve_model)
.join(CVEGroup, CVEGroupEntry.group)
.join(CVEGroupPackage, CVEGroup.packages)
.outerjoin(Advisory, Advisory.group_package_id == CVEGroupPackage.id)
.order_by(CVEGroup.created.desc()).order_by(CVEGroupPackage.pkgname)).all()
group_packages = defaultdict(set)
advisories = set()
groups = set()
for cve, group, pkg, advisory in entries:
group_packages[group].add(pkg.pkgname)
groups.add(group)
if advisory:
advisories.add(advisory)
groups = sorted(groups, key=lambda item: item.created, reverse=True)
groups = sorted(groups, key=lambda item: item.status)
advisories = sorted(advisories, key=lambda item: item.id, reverse=True)
group_packages = dict(map(lambda item: (item[0], sorted(item[1])), group_packages.items()))
return {'issue': cve_model,
'groups': groups,
'group_packages': group_packages,
'advisories': advisories}
@tracker.route('/<regex("{}"):cve>.json'.format(cve_id_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):cve>/json'.format(cve_id_regex[1:-1]), methods=['GET'])
@json_response
def show_cve_json(cve):
data = get_cve_data(cve)
if not data:
return not_found(json=True)
cve = data['issue']
references = cve.reference.replace('\r', '').split('\n') if cve.reference else []
packages = list(set(sorted([item for sublist in data['group_packages'].values() for item in sublist])))
advisories = data['advisories']
if not current_user.role.is_reporter:
advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories))
json_data = OrderedDict()
json_data['name'] = cve.id
json_data['type'] = cve.issue_type
json_data['severity'] = cve.severity.label
json_data['vector'] = cve.remote.label
json_data['description'] = cve.description
json_data['groups'] = [str(group) for group in data['groups']]
json_data['packages'] = packages
json_data['advisories'] = [advisory.id for advisory in advisories]
json_data['references'] = references
json_data['notes'] = cve.notes if cve.notes else None
return json_data
@tracker.route('/<regex("{}"):cve>'.format(cve_id_regex[1:]), methods=['GET'])
def show_cve(cve):
data = get_cve_data(cve)
if not data:
return not_found()
packages = list(set(sorted([item for sublist in data['group_packages'].values() for item in sublist])))
title = '{} - {}'.format(data['issue'].id, ' '.join(packages)) \
if len(packages) else \
'{}'.format(data['issue'].id)
advisories = data['advisories']
if not current_user.role.is_reporter:
advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories))
return render_template('cve.html',
title=title,
issue=data['issue'],
groups=data['groups'],
group_packages=data['group_packages'],
advisories=advisories,
can_watch_log=user_can_watch_log(),
can_edit=user_can_edit_issue(advisories),
can_delete=user_can_delete_issue(advisories))
@tracker.route('/<regex("{}"):cve>/log'.format(cve_id_regex[1:-1]), methods=['GET'])
def show_cve_log(cve):
data = get_cve_data(cve)
if not data:
return not_found()
title = '{} - log'.format(data['issue'].id)
return render_template('log/cve_log.html',
title=title,
issue=data['issue'],
can_watch_user_log=user_can_watch_user_log())
def get_group_data(avg):
avg_id = int(avg.replace('AVG-', ''))
entries = (db.session.query(CVEGroup, CVE, CVEGroupPackage, Advisory, Package)
.filter(CVEGroup.id == avg_id)
.join(CVEGroupEntry, CVEGroup.issues)
.join(CVE, CVEGroupEntry.cve)
.join(CVEGroupPackage, CVEGroup.packages)
.outerjoin(Package, Package.name == CVEGroupPackage.pkgname)
.outerjoin(Advisory, Advisory.group_package_id == CVEGroupPackage.id)).all()
if not entries:
return None
group = None
issues = set()
packages = set()
advisories = set()
issue_types = set()
versions = set()
for group_entry, cve, pkg, advisory, package in entries:
group = group_entry
issues.add(cve)
issue_types.add(cve.issue_type)
packages.add(pkg)
if package:
versions.add(package)
if advisory:
advisories.add(advisory)
advisories = sorted(advisories, key=lambda item: item.id, reverse=True)
issue_types = list(issue_types)
issues = sorted(issues, key=lambda item: item, reverse=True)
packages = sorted(packages, key=lambda item: item.pkgname)
versions = filter_duplicate_packages(sort_packages(list(versions)), True)
advisories_pending = group.status == Status.fixed and group.advisory_qualified and len(advisories) <= 0
return {
'group': group,
'packages': packages,
'versions': versions,
'issues': issues,
'issue_types': issue_types,
'advisories': advisories,
'advisories_pending': advisories_pending
}
@tracker.route('/group/<regex("{}"):avg>.json'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@tracker.route('/group/<regex("{}"):avg>/json'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@tracker.route('/avg/<regex("{}"):avg>.json'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@tracker.route('/avg/<regex("{}"):avg>/json'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):avg>.json'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):avg>/json'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@json_response
def show_group_json(avg):
data = get_group_data(avg)
if not data:
return not_found(json=True)
group = data['group']
advisories = data['advisories']
if not current_user.role.is_reporter:
advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories))
issues = data['issues']
packages = data['packages']
issue_types = data['issue_types']
references = group.reference.replace('\r', '').split('\n') if group.reference else []
json_data = OrderedDict()
json_data['name'] = group.name
json_data['packages'] = [package.pkgname for package in packages]
json_data['status'] = group.status.label
json_data['severity'] = group.severity.label
json_data['type'] = 'multiple issues' if len(issue_types) > 1 else issue_types[0]
json_data['affected'] = group.affected
json_data['fixed'] = group.fixed if group.fixed else None
json_data['ticket'] = group.bug_ticket if group.bug_ticket else None
json_data['issues'] = [str(cve) for cve in issues]
json_data['advisories'] = [advisory.id for advisory in advisories]
json_data['references'] = references
json_data['notes'] = group.notes if group.notes else None
return json_data
@tracker.route('/group/<regex("{}"):avg>'.format(vulnerability_group_regex[1:]), methods=['GET'])
@tracker.route('/avg/<regex("{}"):avg>'.format(vulnerability_group_regex[1:]), methods=['GET'])
@tracker.route('/<regex("{}"):avg>'.format(vulnerability_group_regex[1:]), methods=['GET'])
def show_group(avg):
data = get_group_data(avg)
if not data:
return not_found()
group = data['group']
advisories = data['advisories']
if not current_user.role.is_reporter:
advisories = list(filter(lambda advisory: advisory.publication == Publication.published, advisories))
issues = data['issues']
packages = data['packages']
issue_types = data['issue_types']
versions = data['versions']
issue_type = 'multiple issues' if len(issue_types) > 1 else issue_types[0]
pkgnames = list(set(sorted([pkg.pkgname for pkg in packages])))
form = AdvisoryForm()
form.advisory_type.data = issue_type
return render_template('group.html',
title='{} - {}'.format(group, ' '.join(pkgnames)),
form=form,
group=group,
packages=packages,
issues=issues,
advisories=advisories,
versions=versions,
Status=Status,
issue_type=issue_type,
bug_data=get_bug_data(issues, packages, versions, group),
advisories_pending=data['advisories_pending'],
can_edit=user_can_edit_group(advisories),
can_delete=user_can_delete_group(advisories),
can_handle_advisory=user_can_handle_advisory(),
can_watch_log=user_can_watch_log())
def get_package_data(pkgname):
entries = (db.session.query(Package, CVEGroup, CVE, Advisory)
.filter(Package.name == pkgname)
.outerjoin(CVEGroupPackage, CVEGroupPackage.pkgname == Package.name)
.outerjoin(CVEGroup, CVEGroupPackage.group)
.outerjoin(CVEGroupEntry, CVEGroup.issues)
.outerjoin(CVE, CVEGroupEntry.cve)
.outerjoin(Advisory, and_(Advisory.group_package_id == CVEGroupPackage.id,
Advisory.publication == Publication.published))
).all()
# fallback for dropped packages
if not entries:
entries = (db.session.query(CVEGroupPackage, CVEGroup, CVE, Advisory)
.filter(CVEGroupPackage.pkgname == pkgname)
.join(CVEGroup, CVEGroupPackage.group)
.join(CVEGroupEntry, CVEGroup.issues)
.join(CVE, CVEGroupEntry.cve)
.outerjoin(Advisory, and_(Advisory.group_package_id == CVEGroupPackage.id,
Advisory.publication == Publication.published))
).all()
if not entries:
return None
groups = set()
issues = set()
advisories = set()
versions = set()
for package, group, cve, advisory in entries:
if isinstance(package, Package):
versions.add(package)
if group:
groups.add(group)
if cve:
issues.add((cve, group))
if advisory:
advisories.add(advisory)
issues = [{'issue': e[0], 'group': e[1]} for e in issues]
issues = sorted(issues, key=lambda item: item['issue'], reverse=True)
issues = sorted(issues, key=lambda item: item['group'].status)
groups = sorted(groups, key=lambda item: item.id, reverse=True)
groups = sorted(groups, key=lambda item: item.status)
advisories = sorted(advisories, key=lambda item: item.id, reverse=True)
versions = filter_duplicate_packages(sort_packages(list(versions)), True)
package = versions[0] if versions else None
return {
'package': package,
'pkgname': pkgname,
'versions': versions,
'groups': groups,
'issues': issues,
'advisories': advisories
}
@tracker.route('/group/<regex("{}"):avg>/log'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@tracker.route('/avg/<regex("{}"):avg>/log'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):avg>/log'.format(vulnerability_group_regex[1:-1]), methods=['GET'])
def show_group_log(avg):
data = get_group_data(avg)
if not data:
return not_found(json=True)
group = data['group']
return render_template('log/group_log.html',
title='{} - log'.format(group),
group=group,
Status=Status,
advisories_pending=data['advisories_pending'],
can_watch_user_log=user_can_watch_user_log())
@tracker.route('/package/<regex("{}"):pkgname>.json'.format(pkgname_regex[1:-1]), methods=['GET'])
@tracker.route('/package/<regex("{}"):pkgname>/json'.format(pkgname_regex[1:-1]), methods=['GET'])
@json_response
def show_package_json(pkgname):
data = get_package_data(pkgname)
if not data:
return not_found(json=True)
advisories = data['advisories']
versions = data['versions']
groups = data['groups']
issues = data['issues']
json_advisory = []
for advisory in advisories:
entry = OrderedDict()
entry['name'] = advisory.id
entry['date'] = advisory.created.strftime('%Y-%m-%d')
entry['severity'] = advisory.group_package.group.severity.label
entry['type'] = advisory.advisory_type
entry['reference'] = advisory.reference if advisory.reference else None
json_advisory.append(entry)
json_versions = []
for version in versions:
entry = OrderedDict()
entry['version'] = version.version
entry['database'] = version.database
json_versions.append(entry)
json_groups = []
for group in groups:
entry = OrderedDict()
entry['name'] = group.name
entry['status'] = group.status.label
entry['severity'] = group.severity.label
json_groups.append(entry)
json_issues = []
for issue in issues:
group = issue['group']
issue = issue['issue']
entry = OrderedDict()
entry['name'] = issue.id
entry['severity'] = issue.severity.label
entry['type'] = issue.issue_type
entry['status'] = group.status.label
json_issues.append(entry)
json_data = OrderedDict()
json_data['name'] = pkgname
json_data['versions'] = json_versions
json_data['advisories'] = json_advisory
json_data['groups'] = json_groups
json_data['issues'] = json_issues
return json_data
@tracker.route('/package/<regex("{}"):pkgname>'.format(pkgname_regex[1:]), methods=['GET'])
def show_package(pkgname):
data = get_package_data(pkgname)
if not data:
return not_found()
groups = data['groups']
data['groups'] = {'open': list(filter(lambda group: group.status.open(), groups)),
'resolved': list(filter(lambda group: group.status.resolved(), groups))}
issues = data['issues']
data['issues'] = {'open': list(filter(lambda issue: issue['group'].status.open(), issues)),
'resolved': list(filter(lambda issue: issue['group'].status.resolved(), issues))}
return render_template('package.html',
title='{}'.format(pkgname),
package=data)
@tracker.route('/advisory/<regex("{}"):advisory_id>/raw'.format(advisory_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):advisory_id>/raw'.format(advisory_regex[1:-1]), methods=['GET'])
def show_advisory_raw(advisory_id):
result = show_advisory(advisory_id, raw=True)
if isinstance(result, tuple):
return result
if not isinstance(result, str):
return result
return result, 200, {'Content-Type': 'text/plain; charset=utf-8'}
@tracker.route('/advisory/<regex("{}"):advisory_id>/generate/raw'.format(advisory_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):advisory_id>/generate/raw'.format(advisory_regex[1:-1]), methods=['GET'])
def show_generated_advisory_raw(advisory_id):
result = show_generated_advisory(advisory_id, raw=True)
if isinstance(result, tuple):
return result
if not isinstance(result, str):
return result
return result, 200, {'Content-Type': 'text/plain; charset=utf-8'}
@tracker.route('/advisory/<regex("{}"):advisory_id>'.format(advisory_regex[1:]), methods=['GET'])
@tracker.route('/<regex("{}"):advisory_id>'.format(advisory_regex[1:]), methods=['GET'])
def show_advisory(advisory_id, raw=False):
entries = (db.session.query(Advisory, CVEGroup, CVEGroupPackage, CVE)
.filter(Advisory.id == advisory_id)
.join(CVEGroupPackage, Advisory.group_package)
.join(CVEGroup, CVEGroupPackage.group)
.join(CVEGroupEntry, CVEGroup.issues)
.join(CVE, CVEGroupEntry.cve)
.order_by(CVE.id)
).all()
if not entries:
return not_found()
advisory = entries[0][0]
group = entries[0][1]
package = entries[0][2]
issues = [issue for (advisory, group, package, issue) in entries]
if not advisory.content:
if raw:
return redirect('/{}/generate/raw'.format(advisory_id))
return redirect('/{}/generate'.format(advisory_id))
if raw:
return advisory.content
asa = advisory_extend_html(advisory_escape_html(advisory.content), issues, package)
return render_html_advisory(advisory=advisory, package=package, group=group, raw_asa=asa, generated=False)
@tracker.route('/advisory/<regex("{}"):advisory_id>/generate'.format(advisory_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):advisory_id>/generate'.format(advisory_regex[1:-1]), methods=['GET'])
def show_generated_advisory(advisory_id, raw=False):
advisory = generate_advisory(advisory_id, with_subject=True, raw=raw)
if not advisory:
return not_found()
return advisory
@tracker.route('/advisory/<regex("{}"):advisory_id>/log'.format(advisory_regex[1:-1]), methods=['GET'])
@tracker.route('/<regex("{}"):advisory_id>/log'.format(advisory_regex[1:-1]), methods=['GET'])
def show_advisory_log(advisory_id):
advisory = (db.session.query(Advisory)
.filter(Advisory.id == advisory_id)
).first()
if not advisory:
return not_found()
return render_template('log/advisory_log.html',
title='{} - log'.format(advisory_id),
advisory=advisory,
can_watch_user_log=user_can_watch_user_log())
# TODO: define permission to view this
@tracker.route('/log', defaults={'page': 1}, methods=['GET'])
@tracker.route('/log/page/<int:page>', methods=['GET'])
def show_log(page=1):
Transaction = versioning_manager.transaction_cls
VersionClassCVE = version_class(CVE)
VersionClassGroup = version_class(CVEGroup)
VersionClassAdvisory = version_class(Advisory)
pagination = (db.session.query(Transaction, VersionClassCVE, VersionClassGroup, VersionClassAdvisory)
.outerjoin(VersionClassCVE, Transaction.id == VersionClassCVE.transaction_id)
.outerjoin(VersionClassGroup, Transaction.id == VersionClassGroup.transaction_id)
.outerjoin(VersionClassAdvisory, Transaction.id == VersionClassAdvisory.transaction_id)
.order_by(Transaction.issued_at.desc())
.filter((VersionClassCVE.transaction_id) |
(VersionClassGroup.transaction_id) |
(VersionClassAdvisory.transaction_id))
).paginate(page, TRACKER_LOG_ENTRIES_PER_PAGE, True)
return render_template('log/log.html',
title=f'Log',
can_watch_user_log=user_can_watch_user_log(),
pagination=pagination,
CVE=CVE,
CVEGroup=CVEGroup,
Advisory=Advisory)
| {
"content_hash": "f3cf8717c134b216364c705b187bc5a8",
"timestamp": "",
"source": "github",
"line_count": 594,
"max_line_length": 112,
"avg_line_length": 40.7979797979798,
"alnum_prop": 0.6249071552364447,
"repo_name": "archlinux/arch-security-tracker",
"id": "fd555df84771cb1e141030666e25e91803155366",
"size": "24234",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tracker/view/show.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9806"
},
{
"name": "HTML",
"bytes": "65601"
},
{
"name": "Makefile",
"bytes": "1441"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "345600"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import calendar
import datetime
from django.utils import http as http_utils
from daydreamer.tests.views.core import http
class TestCase(http.TestCase):
"""
Common utilities for testing HTTP view behaviors.
"""
# Utilities.
def format_etag(self, etag):
"""
Quote the given ETag for use in an HTTP header.
"""
return http_utils.quote_etag(etag)
def format_datetime(self, dt):
"""
Format a datetime for use in an HTTP header.
"""
return http_utils.http_date(calendar.timegm(dt.utctimetuple()))
| {
"content_hash": "a1f1f470bfc5dd5523bb033790d83f34",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 22.482758620689655,
"alnum_prop": 0.6196319018404908,
"repo_name": "skibblenybbles/django-daydreamer",
"id": "c1dbd99308c34797df473e8ba4d58ceb16e40f30",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daydreamer/tests/views/behaviors/http/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "241801"
}
],
"symlink_target": ""
} |
"""A manual controller for model search."""
from typing import Iterator, Sequence
from adanet.experimental.controllers.controller import Controller
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.phases.phase import Phase
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class SequentialController(Controller):
"""A controller where the user specifies the sequences of phase to execute."""
# TODO: Add checks to make sure phases are valid.
def __init__(self, phases: Sequence[Phase]):
"""Initializes a SequentialController.
Args:
phases: A list of `Phase` instances.
"""
self._phases = phases
def work_units(self) -> Iterator[WorkUnit]:
previous_phase = None
for phase in self._phases:
for work_unit in phase.work_units(previous_phase):
yield work_unit
previous_phase = phase
def get_best_models(self, num_models: int) -> Sequence[tf.keras.Model]:
final_phase = self._phases[-1]
if isinstance(final_phase, ModelProvider):
return self._phases[-1].get_best_models(num_models)
raise RuntimeError('Final phase does not provide models.')
| {
"content_hash": "3bc31c80bb03cbfbb599d4d50f2a029e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 34.34285714285714,
"alnum_prop": 0.7271214642262895,
"repo_name": "tensorflow/adanet",
"id": "5e186bccef04651949a2a2c229fda78010d67058",
"size": "1826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adanet/experimental/controllers/sequential_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1914501"
},
{
"name": "Python",
"bytes": "1047162"
},
{
"name": "Shell",
"bytes": "2927"
},
{
"name": "Starlark",
"bytes": "28690"
}
],
"symlink_target": ""
} |
Subsets and Splits