text
stringlengths 4
1.02M
| meta
dict |
---|---|
from locust import HttpUser, task, tag
import yaml, json, os
def get_name(opts):
if opts.get('alias'): return opts['alias']
if opts.get('query'): return str(opts['query'])
return opts['path']
def response_printer(opts, response):
if response.status_code == 0:
print(response.error)
exit(1)
elif response.status_code != 200:
print(
"Method: " + opts['method'],
"Query: " + str(opts.get('query')),
"Response status: " + str(response.status_code),
"Response body: " + response.text,
end="\n-------------------------------------------\n", sep="\n")
def create_get_url(params, opts):
url = opts['path']
added_first_param = False
for param_name, param_val in params.items():
if param_val == 'None' or param_val == 'null':
continue
if added_first_param:
url += f'&{param_name}=' + param_val
else:
url += f'?{param_name}=' + param_val
added_first_param = True
return url
class PuppetDbLoadTest(HttpUser):
def get_request(self, opts):
params = {"limit": str(opts.get('limit')),
"offset": str(opts.get('offset')),
"order_by": json.dumps(opts.get('order_by')),
"query": json.dumps(opts.get('query'))}
url = create_get_url(params, opts)
with self.client.request(opts['method'], url, get_name(opts)) as response:
response_printer(opts, response)
def post_request(self, opts):
query = {}
if opts.get('query'):
query['query'] = opts['query']
limit = opts.get('limit')
offset = opts.get('offset')
if limit:
query['limit'] = limit
if offset:
query['offset'] = offset
headers = opts.get('headers')
with self.client.request(opts['method'], opts['path'], get_name(opts), data=json.dumps(query), json=True,
headers=headers) as response:
response_printer(opts, response)
def run_task(self, requests_file):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + requests_file) as stream:
config = yaml.safe_load(stream)
for opts in config:
if opts['method'] == 'GET':
self.get_request(opts)
elif opts['method'] == 'POST':
self.post_request(opts)
@tag('example')
@task
def run_example_queries(self):
self.run_task('/example.yaml')
@tag('console')
@task
def run_console_queries(self):
self.run_task('/console.yaml')
@tag('cd4pe')
@task
def run_cd4pe_queries(self):
self.run_task('/cd4pe.yaml')
@tag('estate')
@task
def run_cd4pe_queries(self):
self.run_task('/estate-reporting.yaml')
@tag('all')
@task
def run_all_queries(self):
self.run_task('/console.yaml')
self.run_task('/cd4pe.yaml')
self.run_task('/estate-reporting.yaml')
| {
"content_hash": "d66aa741a8caddaa5778920a3bdcb63e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 113,
"avg_line_length": 31.09,
"alnum_prop": 0.5342553875844323,
"repo_name": "puppetlabs/puppetdb",
"id": "d2471e1c2ed848fa6958a75ae04df27fc789273e",
"size": "3132",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "locust/load-test/load-test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "2493279"
},
{
"name": "Dockerfile",
"bytes": "6791"
},
{
"name": "HTML",
"bytes": "4074"
},
{
"name": "JavaScript",
"bytes": "5944"
},
{
"name": "Makefile",
"bytes": "9415"
},
{
"name": "Python",
"bytes": "32771"
},
{
"name": "Ruby",
"bytes": "272373"
},
{
"name": "Shell",
"bytes": "121424"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='chicagotransit',
version='0.5.0',
description='A package for interfacing with Chicago Transit APIs',
long_description=long_description,
url='https://github.com/conway/ChicagoTransit',
author='Jake Conway',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
],
keywords=['chicago', 'transit', 'wrapper', 'bus', 'train', 'cta', 'divvy', 'illinois', 'IL', 'transportation'],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['requests', 'xmltodict==0.10.2']
)
| {
"content_hash": "82287fce393542880ea9d131d975cbd3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 115,
"avg_line_length": 37.78787878787879,
"alnum_prop": 0.627906976744186,
"repo_name": "Conway/chicagotransit",
"id": "b77c9a69ed5417023cb7d1ba79070ec66d0895cd",
"size": "1247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24603"
}
],
"symlink_target": ""
} |
from lib.web2py.dal import *
# DAL reference: http://www.web2py.com/book/default/chapter/06#The-database-abstraction-layer
db = DAL('sqlite://database/usdadb.sqlite')
#db = DAL('postgres://username:password@localhost/database')
#db = DAL('mysql://username:password@localhost/database')
#db=DAL('mssql://username:password@localhost/database')
#db=DAL('oracle://username/password@database')
# Avoid "no such table sqlite_sequence" error: sqlite_sequence table is not created,
# until you define at least one autoincrement and primary key column in your schema
db.define_table('dummy')
# Documentation reference: http://www.ars.usda.gov/SP2UserFiles/Place/12354500/Data/SR26/sr26_doc.pdf
db.define_table('food_group_description',
Field('FdGrp_Cd', 'string', length=4, label='Code'),
Field('FdGrp_Desc', 'string', length=60, label='Name'),
primarykey=['FdGrp_Cd']
)
db.define_table('food_description',
Field('NDB_No', 'string', length=5, label='ID'),
Field('FdGrp_Cd', 'reference food_group_description', label='Food group'),
Field('Long_Desc', 'string', length=200, label='Long description'),
Field('Shrt_Desc', 'string', length=60, label='Short description'),
Field('ComName', 'string', length=100, label='Other names'),
Field('ManufacName', 'string', length=65, label='Company'),
Field('Survey', 'string', length=1),
Field('Ref_desc', 'string', length=135, label='Inedible parts'),
Field('Refuse', 'integer', default=0, label='Inedible %'),
Field('SciName', 'string', length=65, label='Scientific name'),
Field('N_Factor', 'double'),
Field('Pro_Factor', 'double'),
Field('Fat_Factor', 'double'),
Field('CHO_Factor', 'double'),
primarykey=['NDB_No']
)
db.define_table('nutrient_definition',
Field('Nutr_No', 'string', length=3, label='ID'),
Field('Units', 'string', length=7),
Field('Tagname', 'string', length=20, label='INFOODS name'),
Field('NutrDesc', 'string', length=60, label='Name'),
Field('SR_Order', 'integer', 'Order'),
primarykey=['Nutr_No']
)
db.define_table('nutrient_data',
Field('NDB_No', 'reference food_description', label='Food ID'),
Field('Nutr_No', 'reference nutrient_definition', label='Nutrient ID'),
Field('Nutr_Val', 'double', label='Edible amount in 100g'),
Field('Num_Data_Pts', 'double', label='No. of analyses'),
Field('Std_Error', 'double', label='Mean std. err.'),
Field('Src_Cd', 'string', length=2, label='Data type code'),
Field('Deriv_Cd', 'string', length=4, label='Data Derivation Code'),
Field('Ref_NDB_No', 'string', length=5, label='NDB number'),
Field('Add_Nutr_Mark', 'string', length=1, label='Fortified'),
Field('Num_Studies', 'integer', label='Studies'),
Field('Min_Val', 'double'),
Field('Max_Val', 'double'),
Field('DF', 'integer', label='Degrees of freedom'),
Field('Low_EB', 'double', label='Lower 95% error bound'),
Field('Up_EB', 'double', label='Upper 95% error bound'),
Field('Stat_cmt', 'string', length=10, label='Statistical comments'),
Field('AddMod_Date', 'string', length=10, label='Modified'),
Field('CC', 'string', length=1, label='Confidence code'),
primarykey=['NDB_No', 'Nutr_No']
)
db.define_table('weight',
Field('NDB_No', 'reference food_description', label='Food ID'),
Field('Seq', 'string', length=2, label='Sequence'),
Field('Amount', 'double'),
Field('Msre_Desc', 'string', length=100, label='Description'),
Field('Gm_Wgt', 'double', label='Gram weight'),
Field('Num_Data_Pts', 'integer', 'Data points'),
Field('Std_Dev', 'double', label='Std. deviation'),
primarykey=['NDB_No', 'Seq']
) | {
"content_hash": "13917a2a760347aea05969e700fcb9e4",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 101,
"avg_line_length": 45.30864197530864,
"alnum_prop": 0.6547683923705722,
"repo_name": "ionelanton/web2py-usdadb",
"id": "00a167c2730bdcab4c6dba906e707617ce60feee",
"size": "3670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "565596"
}
],
"symlink_target": ""
} |
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image06.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [87089152, 87093632]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
worksheet.insert_chart('E9', chart)
worksheet.insert_image('F2', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "73cab31dffaa9b20ba91857a878c4b3a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 25.642857142857142,
"alnum_prop": 0.5821727019498607,
"repo_name": "jmcnamara/XlsxWriter",
"id": "2e43eb490bb31b15032efaaeb7b546187a9a35a8",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_image06.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
} |
import requests
import bs4
from colorama import Fore
def get_html(episode_number: int) -> str:
print(Fore.YELLOW + f"Getting HTML for episode {episode_number}", flush=True)
url = f'https://talkpython.fm/{episode_number}'
resp = requests.get(url)
resp.raise_for_status()
return resp.text
def get_title(html: str, episode_number: int) -> str:
print(Fore.CYAN + f"Getting TITLE for episode {episode_number}", flush=True)
soup = bs4.BeautifulSoup(html, 'html.parser')
header = soup.select_one('h1')
if not header:
return "MISSING"
return header.text.strip()
def main():
get_title_range()
print("Done.")
def get_title_range():
# Please keep this range pretty small to not DDoS my site. ;)
for n in range(180, 195):
html = get_html(n)
title = get_title(html, n)
print(Fore.WHITE + f"Title found: {title}", flush=True)
if __name__ == '__main__':
main()
| {
"content_hash": "f3294b127f0267b68285c41e82fc633b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 81,
"avg_line_length": 23.775,
"alnum_prop": 0.6330178759200841,
"repo_name": "Wintellect/WintellectWebinars",
"id": "656647c7ee35c634f1285e28a94c417bb50876c8",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2019-01-24-async-python-kennedy/code/web_scraping/sync_scrape/program.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "47583"
},
{
"name": "CSS",
"bytes": "39803"
},
{
"name": "HTML",
"bytes": "87870"
},
{
"name": "JavaScript",
"bytes": "4383753"
},
{
"name": "Jupyter Notebook",
"bytes": "234737"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "208421"
},
{
"name": "SCSS",
"bytes": "152"
},
{
"name": "Shell",
"bytes": "4251"
},
{
"name": "TypeScript",
"bytes": "142946"
}
],
"symlink_target": ""
} |
PANEL_GROUP = 'rating'
PANEL_DASHBOARD = 'admin'
PANEL = 'pyscripts'
ADD_PANEL = 'cloudkittydashboard.dashboards.admin.pyscripts.panel.PyScripts'
| {
"content_hash": "06429d282377b28012f1a29c3762788b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 76,
"avg_line_length": 29.4,
"alnum_prop": 0.7755102040816326,
"repo_name": "openstack/cloudkitty-dashboard",
"id": "cd4c2d21ace08745ca15d4db97c789af1506bebe",
"size": "147",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cloudkittydashboard/enabled/_13_admin_pyscripts_panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15434"
},
{
"name": "JavaScript",
"bytes": "7687"
},
{
"name": "Python",
"bytes": "114540"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from getpass import getpass
try:
# These are for python3 support
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from urllib.parse import urlencode
unistr = str
except ImportError:
# Fallback to python2
from urllib2 import urlopen, Request
from urllib2 import HTTPError
from urllib import urlencode
unistr = unicode
class ClientLogin(object):
"""
A Google ClientLogin session management class.
It does not support a captcha verification, but it does work correctly with
two-step authentication when it is provided with an application password
rather than the Google account password.
"""
# This is the URL used for ClientLogin authentication
AUTH_URL = 'https://www.google.com/accounts/ClientLogin'
def __init__(self, user, passwd, service, acct_type='GOOGLE', source=None):
"""
Create a new instance of the management class with the provided
credentials.
:param user:
User's full email address.
:param passwd:
User's password. If the user is using two-factor authentication, this
should be a password created specifically for this application.
:param service:
Name of the Google service you're requesting authorization for.
:param acct_type:
Type of account to request authorization for.
Possible values are GOOGLE (default), HOSTED, or HOSTED_OR_GOOGLE.
:param source: (optional)
Short string identifying your application, for logging purposes.
"""
self.user = user
self.passwd = passwd
self.service = service
self.acct_type = acct_type
self.source = source
self.auth_token = None
self.sid_token = None
self.lsid_token = None
def _process_response(self, resp):
ret = {}
for line in resp.split('\n'):
if '=' in line:
var, val = line.split('=', 1)
ret[var] = val
return ret
def _make_request(self, url, data, headers):
if not data:
data = None
else:
data = urlencode(data)
data = data.encode('utf8')
req = Request(url, data, headers)
err = None
try:
resp_obj = urlopen(req)
except HTTPError as e:
err = e.code
return err, e.read()
resp = resp_obj.read()
resp_obj.close()
return None, unistr(resp, encoding='utf8')
def _request_tokens(self):
data = {
'Email': self.user,
'Passwd': self.passwd,
'accountType': self.acct_type,
'service': self.service
}
if self.source:
data['source'] = self.source
headers = {
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'
}
err, resp = self._make_request(self.AUTH_URL, data, headers)
if err is not None:
#raise Exception("HTTP Error %d" % err)
return
ret = self._process_response(resp)
if 'Error' in ret:
#raise Exception(ret['Error'])
return
if 'Auth' in ret:
self.auth_token = ret['Auth']
if 'SID' in ret:
self.sid_token = ret['SID']
if 'LSID' in ret:
self.lsid_token = ret['LSID']
def get_auth_token(self, request=False):
"""
Returns an authentication token, creating one if needed.
The first time this method is called, it will make a request for an
authentication token. Subsequent calls to this method will return that
same token, unless the request parameter is true.
:param request: Force the request of a new authentication token.
"""
if self.auth_token is None or request is True:
self._request_tokens()
return self.auth_token
def get_sid_token(self, request=False):
"""
Returns an SID cookie token, creating one if needed.
The first time this method is called, it will make a request for an
authentication token. Subsequent calls to this method will return that
same token, unless the request parameter is true.
:param request: Force the request of a new SID token.
"""
if self.sid_token is None or request is True:
self._request_tokens()
return self.sid_token
def get_lsid_token(self, request=False):
"""
Returns an LSID cookie token, creating one if needed.
The first time this method is called, it will make a request for an
authentication token. Subsequent calls to this method will return that
same token, unless the request parameter is true.
:param request: Force the request of a new LSID token.
"""
if self.lsid_token is None or request is True:
self._request_tokens()
return self.lsid_token
def is_authenticated(self):
"""
Returns whether this client login instance is authenticated.
Returns True if there are valid tokens, False otherwise.
"""
return (
self.auth_token is not None
and self.sid_token is not None
and self.lsid_token is not None
)
| {
"content_hash": "776fe2dedf3297ee2b092ac988a3c102",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 32.37869822485207,
"alnum_prop": 0.5942982456140351,
"repo_name": "vially/xbmc-gmusicapi",
"id": "c5693b2b840e82e646a40999e4850051c07dbe4e",
"size": "5519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/gmusicapi/utils/clientlogin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "432971"
}
],
"symlink_target": ""
} |
import binascii
import copy
from castellan.common.objects import symmetric_key as key
import mock
from oslo_concurrency import processutils
import six
import uuid
from nova import exception
from nova.tests.unit.volume.encryptors import test_base
from nova.volume.encryptors import cryptsetup
def fake__get_key(context, passphrase):
raw = bytes(binascii.unhexlify(passphrase))
symmetric_key = key.SymmetricKey('AES', len(raw) * 8, raw)
return symmetric_key
class CryptsetupEncryptorTestCase(test_base.VolumeEncryptorTestCase):
@mock.patch('os.path.exists', return_value=False)
def _create(self, connection_info, mock_exists):
return cryptsetup.CryptsetupEncryptor(connection_info)
def setUp(self):
super(CryptsetupEncryptorTestCase, self).setUp()
self.dev_path = self.connection_info['data']['device_path']
self.dev_name = 'crypt-%s' % self.dev_path.split('/')[-1]
self.symlink_path = self.dev_path
@mock.patch('nova.utils.execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name,
self.dev_path, process_input='passphrase',
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = fake__get_key(None, fake_key)
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name,
self.dev_path, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'remove', self.dev_name,
run_as_root=True, check_exit_code=[0, 4]),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'remove', self.dev_name,
run_as_root=True, check_exit_code=[0, 4]),
])
self.assertEqual(1, mock_execute.call_count)
def test_init_volume_encryption_not_supported(self):
# Tests that creating a CryptsetupEncryptor fails if there is no
# device_path key.
type = 'unencryptable'
data = dict(volume_id='a194699b-aa07-4433-a945-a5d23802043e')
connection_info = dict(driver_volume_type=type, data=data)
exc = self.assertRaises(exception.VolumeEncryptionNotSupported,
cryptsetup.CryptsetupEncryptor,
connection_info)
self.assertIn(type, six.text_type(exc))
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_init_volume_encryption_with_old_name(self, mock_execute,
mock_exists):
# If an old name crypt device exists, dev_path should be the old name.
old_dev_name = self.dev_path.split('/')[-1]
encryptor = cryptsetup.CryptsetupEncryptor(self.connection_info)
self.assertFalse(encryptor.dev_name.startswith('crypt-'))
self.assertEqual(old_dev_name, encryptor.dev_name)
self.assertEqual(self.dev_path, encryptor.dev_path)
self.assertEqual(self.symlink_path, encryptor.symlink_path)
mock_exists.assert_called_once_with('/dev/mapper/%s' % old_dev_name)
mock_execute.assert_called_once_with(
'cryptsetup', 'status', old_dev_name, run_as_root=True)
@mock.patch('os.path.exists', side_effect=[False, True])
@mock.patch('nova.utils.execute')
def test_init_volume_encryption_with_wwn(self, mock_execute, mock_exists):
# If an wwn name crypt device exists, dev_path should be based on wwn.
old_dev_name = self.dev_path.split('/')[-1]
wwn = 'fake_wwn'
connection_info = copy.deepcopy(self.connection_info)
connection_info['data']['multipath_id'] = wwn
encryptor = cryptsetup.CryptsetupEncryptor(connection_info)
self.assertFalse(encryptor.dev_name.startswith('crypt-'))
self.assertEqual(wwn, encryptor.dev_name)
self.assertEqual(self.dev_path, encryptor.dev_path)
self.assertEqual(self.symlink_path, encryptor.symlink_path)
mock_exists.assert_has_calls([
mock.call('/dev/mapper/%s' % old_dev_name),
mock.call('/dev/mapper/%s' % wwn)])
mock_execute.assert_called_once_with(
'cryptsetup', 'status', wwn, run_as_root=True)
@mock.patch('nova.utils.execute')
def test_attach_volume_unmangle_passphrase(self, mock_execute):
fake_key = '0725230b'
fake_key_mangled = '72523b'
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = fake__get_key(None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=2), # luksOpen
mock.DEFAULT,
mock.DEFAULT,
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name,
self.dev_path, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name,
self.dev_path, process_input=fake_key_mangled,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(3, mock_execute.call_count)
| {
"content_hash": "0d95960e35ea1ee4d08e5dc29f5e3ac7",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 78,
"avg_line_length": 42.798701298701296,
"alnum_prop": 0.6164466697011076,
"repo_name": "hanlind/nova",
"id": "53eb1098c2120e9106e463198313b9b83ecb5b90",
"size": "7267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/volume/encryptors/test_cryptsetup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18681206"
},
{
"name": "Shell",
"bytes": "32127"
},
{
"name": "Smarty",
"bytes": "306159"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
async def sample_delete_hyperparameter_tuning_job():
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest(
name="name_value",
)
# Make the request
operation = client.delete_hyperparameter_tuning_job(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_async]
| {
"content_hash": "e8f932ceda31a4d282ea100a89ef0f3f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 27.82608695652174,
"alnum_prop": 0.740625,
"repo_name": "googleapis/python-aiplatform",
"id": "b714aacd5be5378f4a0fc4d0d1c4a74e32ac9315",
"size": "2066",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
"""
Script for building and saving the model for the ``SkLearnBallTreeHashIndex``
implementation of ``HashIndex``.
"""
import logging, cPickle, os
from smqtk.algorithms.nn_index.hash_index.sklearn_balltree import SkLearnBallTreeHashIndex
from smqtk.utils.bin_utils import (
initialize_logging,
report_progress,
basic_cli_parser,
)
from smqtk.utils.bit_utils import int_to_bit_vector_large
def cli_parser():
parser = basic_cli_parser(__doc__)
parser.add_argument("hash2uuids_fp", type=str)
parser.add_argument("bit_len", type=int)
parser.add_argument("leaf_size", type=int)
parser.add_argument("rand_seed", type=int)
parser.add_argument("balltree_model_fp", type=str)
return parser
def main():
args = cli_parser().parse_args()
initialize_logging(logging.getLogger('smqtk'), logging.DEBUG)
initialize_logging(logging.getLogger('__main__'), logging.DEBUG)
log = logging.getLogger(__name__)
hash2uuids_fp = os.path.abspath(args.hash2uuids_fp)
bit_len = args.bit_len
leaf_size = args.leaf_size
rand_seed = args.rand_seed
balltree_model_fp = os.path.abspath(args.balltree_model_fp)
assert os.path.isfile(hash2uuids_fp), "Bad path: '%s'" % hash2uuids_fp
assert os.path.isdir(os.path.dirname(balltree_model_fp)), \
"Bad path: %s" % balltree_model_fp
log.debug("hash2uuids_fp : %s", hash2uuids_fp)
log.debug("bit_len : %d", bit_len)
log.debug("leaf_size : %d", leaf_size)
log.debug("rand_seed : %d", rand_seed)
log.debug("balltree_model_fp: %s", balltree_model_fp)
log.info("Loading hash2uuids table")
with open(hash2uuids_fp) as f:
hash2uuids = cPickle.load(f)
log.info("Computing hash-code vectors")
hash_vectors = [] #[int_to_bit_vector_large(h, bit_len) for h in hash2uuids]
rs = [0] * 7
for h in hash2uuids:
hash_vectors.append( int_to_bit_vector_large(h, bit_len) )
report_progress(log.debug, rs, 1.)
log.info("Initializing ball tree")
btree = SkLearnBallTreeHashIndex(balltree_model_fp, leaf_size, rand_seed)
log.info("Building ball tree")
btree.build_index(hash_vectors)
if __name__ == '__main__':
main()
| {
"content_hash": "73b28ab2eea77d07cdbe327dca99af60",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 90,
"avg_line_length": 32.27536231884058,
"alnum_prop": 0.6641221374045801,
"repo_name": "Purg/SMQTK",
"id": "0af102994d74a43963db2253012f60317b03f496",
"size": "2227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/smqtk/bin/make_balltree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "93558"
},
{
"name": "C++",
"bytes": "812600"
},
{
"name": "CMake",
"bytes": "68672"
},
{
"name": "CSS",
"bytes": "2297"
},
{
"name": "Cuda",
"bytes": "69131"
},
{
"name": "HTML",
"bytes": "79601"
},
{
"name": "Java",
"bytes": "97253"
},
{
"name": "JavaScript",
"bytes": "123457"
},
{
"name": "Jupyter Notebook",
"bytes": "85336"
},
{
"name": "M4",
"bytes": "61280"
},
{
"name": "Makefile",
"bytes": "4344"
},
{
"name": "Matlab",
"bytes": "23266"
},
{
"name": "Perl",
"bytes": "3762394"
},
{
"name": "Python",
"bytes": "1281460"
},
{
"name": "Shell",
"bytes": "26340"
},
{
"name": "TeX",
"bytes": "74581"
}
],
"symlink_target": ""
} |
from __future__ import division
import MySQLdb
import scipy.sparse as ss
import numpy as np, numpy.linalg as nlg
from nltk.stem.snowball import SnowballStemmer
import gaussianRandomFeatures as grf
import time
import re
import string
import sys
import os
from multiprocessing import Process, Queue
import datetime
from dateutil.parser import parse
import pytz
import json
class ProcessArgs():
def __init__(self, messages_data, q, i, skip_stemmer):
self.messages_data = messages_data
self.q = q
self.i = i
self.skip_stemmer = skip_stemmer
class generalDataConnect():
def __init__(self):
self.randomFeatures = None
self.ts_magic_number = 13168189440000.0
self.rn = 100
self.sine=True
self.rfc = None
# generalDataConnect
def calculateTFIDF(self, params):
emailwords = [dict() for x in range(len(params.messages_data))]
skip_words = {'the': 1}
skip_words['the'] = 1
skip_words['be'] = 1
skip_words['to'] = 1
skip_words['of'] = 1
skip_words['and'] = 1
skip_words['a'] = 1
skip_words['in'] = 1
skip_words['that'] = 1
skip_words['have'] = 1
skip_words['i'] = 1
skip_words['it'] = 1
skip_words['for'] = 1
skip_words['not'] = 1
skip_words['on'] = 1
skip_words['with'] = 1
skip_words['he'] = 1
skip_words['as'] = 1
skip_words['you'] = 1
skip_words['do'] = 1
skip_words['at'] = 1
skip_words['this'] = 1
skip_words['but'] = 1
skip_words['his'] = 1
skip_words['by'] = 1
skip_words['from'] = 1
skip_words['they'] = 1
skip_words['we'] = 1
skip_words['say'] = 1
skip_words['her'] = 1
skip_words['she'] = 1
skip_words['or'] = 1
skip_words['an'] = 1
skip_words['will'] = 1
skip_words['my'] = 1
skip_words['one'] = 1
skip_words['all'] = 1
skip_words['would'] = 1
skip_words['there'] = 1
skip_words['their'] = 1
skip_words['what'] = 1
# the total number of times a word was seen
# a word being in this dict will determine if it is used
# if a word is removed from this dict it will not be used
wordcount = {}
# the number of messages that this word was in
wordcount_message = {}
message_count = len(params.messages_data)
local_message_id = 0
stemmer = SnowballStemmer("english")
for messageid in params.messages_data:
body = params.messages_data[messageid]
message_arr = re.split('\s+', string.lower(body))
if (local_message_id % 1000 == 0):
print "Thread ",params.i, " ", local_message_id, " / ", message_count
for word in message_arr:
# remove nonword characters
word = re.sub('[\W_]+', '', word)
if (word == ""):
continue
if (len(word) > 255):
continue
if (word in skip_words):
continue
if (params.skip_stemmer == False):
try:
word = stemmer.stem(word)
except:
print "Stemming error in word ", word, " message ", messageid
# save the count of this word in this message
# if (local_message_id not in emailwords):
# emailwords[local_message_id] = {}
if (word in emailwords[local_message_id]):
emailwords[local_message_id][word] += 1
else:
emailwords[local_message_id][word] = 1
local_message_id += 1
print "Thread ", params.i, ": Counting words"
# count the total number of times each word was seen
for messageid in range(local_message_id):
for word in emailwords[messageid].keys():
if (word in wordcount):
wordcount[word] += emailwords[messageid][word]
else:
wordcount[word] = emailwords[messageid][word]
if (emailwords[messageid][word] > 0):
if (word in wordcount_message):
wordcount_message[word] += 1
else:
wordcount_message[word] = 1
if (len(emailwords) != message_count):
print "Error: Thread ", params.i, ":emailwords array size (", len(emailwords), ") does not match expected number of words: ", message_count
sys.exit()
if (local_message_id != message_count):
print "Error: Thread ", params.i, ":local_message_id (", local_message_id, ") does not match expected number of words: ", message_count
sys.exit()
params.q.put(message_count)
for i in range(message_count):
params.q.put(emailwords[i])
params.q.put(wordcount)
params.q.put(wordcount_message)
# generalDataConnect
# Gettfidfsimilarity retrieves the pre-processed similarity from the database. Building the database
# is slow. This function builds the TFIDF in memory here which is about 5x faster but has to be done
# every time the daemon is started
def getTFIDFSimilarityFromMessage(self, tfidf_wordlimit, number_of_threads, skip_stemmer, message_count, out_to_database):
if (number_of_threads > message_count):
print "ERROR: number of messages, ",message_count,", is fewer than number of threads, ",number_of_threads,". Not supported"
raise NotImplementedError()
# This code can remove words seen in more than some % of the messages
# It turns out this is not very useful in the datasets that we have so
# the functionality hasn't been implemented in the Perl import code yet
stopword_threshold = 0.10
total_words = 0
# the total number of times a word was seen
# a word being in this dict will determine if it is used
# if a word is removed from this dict it will not be used
wordcount = {}
# the number of messages that this word was in
wordcount_message = {}
# the number of times a word was seen in a given message
emailwords = []
thread_range_messages = int(message_count / number_of_threads)
# round up so the last thread doesn't drop messages
if ((message_count % number_of_threads) != 0):
thread_range_messages += 1
print "range is ", thread_range_messages
mythreads = []
myqueues = []
# we collect the message text into a dict and pass it into the process
# this can waste memory because we're storing all of the text in memory
# rather than iteratively reading it in. However we're looking at about
# 500MB for 250k emails with 55 million words so I'm hoping the drawback
# is minimal given the flexibility we get allowing for different methods
# of text ingest
for i in range(number_of_threads):
start_message = thread_range_messages * i
end_message = min(thread_range_messages * (i+1), message_count)
# range is [start_message, end_message)
print "Thread ", start_message, " to ", end_message
q = Queue()
messages_data = self.getMessages(start_message, end_message)
# messages_data = self.getMessages()
process_args = ProcessArgs(messages_data, q, i, skip_stemmer)
t = Process(target=self.calculateTFIDF, args=(process_args,))
t.start()
mythreads.append(t)
myqueues.append(q)
for i in range(number_of_threads):
mycount = myqueues[i].get()
print "array size " , mycount
for j in range(mycount):
mydict = myqueues[i].get()
emailwords.append(mydict)
wordcount_local = myqueues[i].get()
for word in wordcount_local:
total_words += wordcount_local[word]
if (word in wordcount):
wordcount[word] += wordcount_local[word]
else:
wordcount[word] = wordcount_local[word]
wordcount_message_local = myqueues[i].get()
for word in wordcount_message_local:
if (word in wordcount_message):
wordcount_message[word] += wordcount_message_local[word]
else:
wordcount_message[word] = wordcount_message_local[word]
mythreads[i].join()
print "Threads done"
# We used to count the words all at once here in the main thread but it is
# 3% to 6% faster to do it in the thread and send it over a pipe because
# most threads can do their counting while waiting for the slowest thread to
# finish doing its main processing.
print "Total words seen ", total_words
stopword_threshold_count = int(stopword_threshold * float(message_count))
print "\nRemoving words seen in more than " + str(stopword_threshold_count) + " messages"
stopword_removed_count = 0
for word in wordcount_message:
if (wordcount_message[word] > stopword_threshold_count):
print "Removing high frequency word ", word
del wordcount[word]
stopword_removed_count += 1
print "Removed ", stopword_removed_count
if (tfidf_wordlimit > 0):
print "\nCalculating wordcount threshold"
wordcount_threshold = 1
while (len(wordcount) > tfidf_wordlimit):
if (wordcount_threshold % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
for word in wordcount.keys():
if (wordcount[word] < wordcount_threshold):
del wordcount[word]
wordcount_threshold += 1
print "\nWordcount threshold was " + str(wordcount_threshold) + ". " + (str(len(wordcount))) + " words remain"
word_id_next = 0
word_id_list = {}
ret_row = []
ret_col = []
ret_data = []
if (out_to_database):
print "\nWriting tfidf information to database\n"
self.clearTFIDFToDatabase()
database_inserts = 0
for messageid in range(message_count):
for word in emailwords[messageid].keys():
if (word not in wordcount):
continue
word_id = -1
if (word in word_id_list):
word_id = word_id_list[word]
else:
word_id = word_id_next
word_id_list[word] = word_id
word_id_next += 1
if (out_to_database):
self.setTFIDFWordIDToDatabase(word, word_id)
ret_row.append(word_id)
ret_col.append(messageid)
ret_data.append(emailwords[messageid][word])
if (out_to_database):
if (database_inserts % 10000 == 0):
sys.stdout.write('.')
sys.stdout.flush()
self.setTFIDFToDatabase(word_id, messageid, emailwords[messageid][word])
database_inserts += 1
print "\n"
ret_matrix = ss.csr_matrix((ret_data, (ret_row, ret_col)), shape=(len(wordcount), message_count))
return ret_matrix
# generalDataConnect
def getTimeSimMatrix (self):
if self.rfc is None:
self.randomFeatures = grf.GaussianRandomFeatures(dim=2,gammak=1/self.ts_magic_number,rn=self.rn,sine=True)
tdata = self.getMessageTimes()
ret_row = []
ret_col = []
ret_data = []
for t in tdata:
t = t.split()
row = int(t[0])
tm = float(t[1])
rf = self.randomFeatures.computeRandomFeatures (tm)
for idx,v in enumerate([rf]):
ret_col.append(row)
ret_row.append(idx)
ret_data.append(v)
num_f = self.rn*2 if self.sine else self.rn
ret_matrix = ss.csr_matrix((ret_data, (ret_row, ret_col)), shape=(num_f, self.getTotalMessageCount()))
return ret_matrix
# generalDataConnect
def getSenderSimMatrix (self):
# sdata = self.getMessageTimes()
emailCount = self.getTotalMessageCount()
ret_row = []
ret_col = []
for i in xrange(emailCount):
user_list = self.getUsersByMessage(i)
ret_row.extend(user_list)
ret_col.extend(len(user_list) * [i])
ret_data = len(ret_row) * [1]
ret_matrix = ss.csr_matrix((ret_data, (ret_row, ret_col)), shape=(self.getTotalUserCount(), emailCount))
return ret_matrix
# generalDataConnect
def getWordMatrix(self, tfidf_wordlimit, skip_stemmer, num_threads, message_count, out_to_database, in_from_database):
similarity_data = None
t1 = time.time()
if (in_from_database) :
similarity_data = self.getTFIDFFromDatabase(message_count)
else :
similarity_data = self.getTFIDFSimilarityFromMessage(tfidf_wordlimit, num_threads, skip_stemmer, message_count, out_to_database)
print("Time for importing data ", time.time() - t1)
s = 1./(np.sqrt((similarity_data.multiply(similarity_data)).sum(1)))
# print s.shape
# print similarity_data.shape
# print type(similarity_data)
# print type(s)
# import IPython
# IPython.embed()
s[np.isinf(s)] == 0
s = ss.csr_matrix(s)
similarity_data = similarity_data.multiply(s)
return similarity_data
# generalDataConnect
def getFinalFeatureMatrix (self, tfidf_wordlimit, skip_stemmer, num_threads, message_count, out_to_database, in_from_database, tc=1.0, sc=1.0):
# if not using any of these matrices, remove them from the calculation to save computation of zeros
wMat = self.getWordMatrix(tfidf_wordlimit, skip_stemmer, num_threads, message_count, out_to_database, in_from_database)
if (tc > 0):
tMat = self.getTimeSimMatrix ()
wMat = ss.bmat([[wMat],[tc*tMat]])
if (sc > 0):
sMat = self.getSenderSimMatrix ()
wMat = ss.bmat([[wMat],[sc*sMat]])
# import IPython
# IPython.embed()
# The next two lines of code remove rows/columns of wMat which
# are entirely only 0.
wMat = wMat[np.squeeze(np.array(np.nonzero(wMat.sum(axis=1))[0])),:]
wMat = wMat[:,np.squeeze(np.array(np.nonzero(wMat.sum(axis=0))))[1]]
return wMat
# generalDataConnect
def getAffinityMatrix (self, tfidf_wordlimit, skip_stemmer, num_threads, message_count, out_to_database, in_from_database, tc=1.0, sc=1.0):
# if not using any of these matrices, remove them from the calculation to save computation of zeros
wMat = self.getFinalFeatureMatrix(tfidf_wordlimit, skip_stemmer, num_threads, message_count, out_to_database, in_from_database, tc, sc)
return wMat.T.dot(wMat)
# generalDataConnect
def getFieldByMessage(self, message_id, field_name):
raise NotImplementedError()
# generalDataConnect
# return an array listing all people involved in a message: the sender and the recipients
# If the sender is negative (culled but frequency threshold) don't return it
def getUsersByMessage(self, message_id):
ret_array = []
ret_array.append(self.getRecipientsByMessage(message_id))
sender = self.getSenderByMessage(message_id)
if (sender >= 0):
ret_array.append(sender)
return ret_array
# generalDataConnect
# This was implemented in the old Java code but I think it is actually unused
def getMessageTimes(self):
raise NotImplementedError()
# generalDataConnect
# This was implemented in the old Java code but I think it is actually unused
def getMessageTimesAndSenders(self):
raise NotImplementedError()
# mysqlDataConnect
# return messages [start_message, end_message)
def getMessages(self, start_message, end_message):
raise NotImplementedError()
class flatfileDataConnect (generalDataConnect):
# flatfileDataConnect
def __init__ (self):
self.messages_data = None
self.users_to_id = None
self.id_to_users = None
generalDataConnect.__init__ (self)
# flatfileDataConnect
def connect(self, path):
self.messages_data = {}
self.users_to_id = {}
self.id_to_users = []
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
files_seen = 0;
message_index = 0;
if (os.path.isfile(path)):
message_index = self.processFlatfile(path, message_index)
elif (os.path.isdir(path)):
for root, dirs, files in os.walk(path):
for name in files:
message_index = self.processFlatfile(os.path.join(root, name), message_index)
if (files_seen % 100 == 0):
print "files: ",files_seen,"/",len(files)," num messages:", message_index
if (files_seen % 10 == 0):
sys.stdout.write(".")
files_seen += 1
#if (files_seen == 1000):
# break
return len(self.messages_data)
# returns the new message_index (a file may have any number of records in it)
def processFlatfile(self, file, message_index):
f = open(file)
data = f.read().split('\n')
# we can't just return len(data) because a line might be blank so we count actual lines before we return
f.close()
epoch = datetime.datetime(1970,1,1,0,0,0,0,pytz.UTC)
for one_data in data:
if (one_data == ""):
continue
try:
one_data_parsed = json.loads(one_data)
except:
# this is ok because the text file might not necessarily have json data in it
continue
self.messages_data[message_index] = {}
self.messages_data[message_index]["text"] = one_data_parsed["text"].encode('ascii', 'replace')
# Fri Jun 01 08:44:12 +0000 2013
parsed_date = parse(one_data_parsed["created_at"])
self.messages_data[message_index]["timestamp"] = (parsed_date-epoch).total_seconds()
username = one_data_parsed["user"]["screen_name"]
if (self.users_to_id.get(username) != None):
self.messages_data[message_index]["userid"] = self.users_to_id.get(username)
else:
id = len(self.id_to_users)
self.users_to_id[username] = id
self.id_to_users.append(username)
self.messages_data[message_index]["userid"] = id
message_index += 1
return message_index
# flatfileDataConnect
def getTotalMessageCount(self):
return str(len(self.messages_data))
# flatfileDataConnect
def getTotalUserCount(self):
return 1
# flatfileDataConnect
def getRecipientsByMessage(self, message_id):
ret_arr = []
return ret_arr
# flatfileDataConnect
def getMessagesByKeywordSubject(self, word):
return self.getMessagesByKeyword(word)
# flatfileDataConnect
def getMessagesByKeyword(self, word):
data = []
for messageid in self.messages_data:
if (re.search(r"" + re.escape(word), self.messages_data[messageid]["text"])):
data.append(str(messageid) + " : " + self.messages_data[messageid]["timestamp"] + " : " + self.messages_data[messageid]["text"])
return data
# flatfileDataConnect
def getTimeByMessage(self, message_id):
return self.messages_data[message_id]["timestamp"]
# flatfileDataConnect
def getSubjectByMessage(self, message_id):
return self.messages_data[message_id]["text"]
# flatfileDataConnect
def getSenderByMessage(self, message_id):
return self.messages_data[message_id]["userid"]
# flatfileDataConnect
def getMessageBodyFromMessageID(self, message_id):
data = []
data.append(self.messages_data[message_id]["timestamp"]);
data.append("");
data.append(self.messages_data[message_id]["text"]);
return data
# flatfileDataConnect
def getMessageSubjectFromMessageID(self, message_id):
return self.messages_data[message_id]["text"]
# flatfileDataConnect
def getMessagesFromUserToUser(self, user_from, user_to):
return None
# flatfileDataConnect
def getUserNameFromID(self, user_id):
if (user_id < 0 or user_id >= len(self.id_to_users)):
return ""
return self.id_to_users[user_id]
# returns an array where each value is a string of the form "<message_id> <seconds from epoch timestamp>""
def getMessageTimes(self):
data = []
for messageid in self.messages_data:
data.append(str(messageid) + " " + str(self.messages_data[messageid]["timestamp"]))
return data
# flatfileDataConnect
# return messages [start_message, end_message)
def getMessages(self, start_message, end_message):
messages_data_process = {}
for messageid in range(start_message,end_message):
messages_data_process[messageid] = self.messages_data[messageid]["text"]
return messages_data_process
class mysqlDataConnect (generalDataConnect):
#mysqlDataConnect
def __init__ (self):
self.db = None
self.word_id_cache = None
generalDataConnect.__init__ (self)
#mysqlDataConnect
def connect(self, database, db_host="localhost", db_user="root", db_password=""):
self.db = MySQLdb.connect(host=db_host,
user=db_user,
passwd=db_password,
db=database)
return self.getTotalMessageCount()
#mysqlDataConnect
# return the number of messages
def getTotalMessageCount(self):
cur = self.db.cursor()
cur.execute("SELECT COUNT(messageID) FROM messages")
row=cur.fetchone()
return int(row[0])
#mysqlDataConnect
# return the number of users
def getTotalUserCount(self):
cur = self.db.cursor()
cur.execute("SELECT COUNT(psersonid) FROM people")
row=cur.fetchone()
return str(row[0])
#mysqlDataConnect
# for a given message id, return an array of recipients of that message
# each value is a long, not a string
def getRecipientsByMessage(self, message_id):
cur = self.db.cursor()
cur.execute("SELECT DISTINCT personid FROM recipients WHERE messageid=" + str(message_id))
data = []
for row in cur.fetchall():
data.append(int(row[0]))
return data
#mysqlDataConnect
# for a given a keyword, return an array of "<message_id> : <timestamp> : <subject>"
# for any message whose subject matches
# we do not scrub the inputs here and assume the user won't do anything malicious
def getMessagesByKeywordSubject(self, word):
cur = self.db.cursor()
cur.execute("SELECT messages.messageid, messages.messagedt, messages.subject"
+ " FROM messages WHERE subject LIKE '%" + word + "%' ORDER BY messageid")
data = []
for row in cur.fetchall():
data.append(str(row[0]) + " : " + str(row[1]) + " : " + row[2])
return data
#mysqlDataConnect
# for a given a keyword, return an array of "<message_id> : <timestamp> : <subject>"
# for any message whose body
# we do not scrub the inputs here and assume the user won't do anything malicious
def getMessagesByKeyword(self, word):
cur = self.db.cursor()
cur.execute("SELECT messages.messageid, messages.messagedt, messages.subject"
+ " FROM messages INNER JOIN bodies ON messages.messageid=bodies.messageid "
+ " WHERE body LIKE '%" + word + "%' ORDER BY messages.messagedt")
data = []
for row in cur.fetchall():
data.append(str(row[0]) + " : " + str(row[1]) + " : " + row[2])
return data
#mysqlDataConnect
# for a message_id, return the timestamp
def getTimeByMessage(self, message_id):
return str(self.getFieldByMessage(message_id, "messagedt")) + ""
#mysqlDataConnect
# for a message_id, return the timestamp
def getSubjectByMessage(self, message_id):
return str(self.getFieldByMessage(message_id, "subject")) + ""
#mysqlDataConnect
# for a message_id, return the sender_id
def getSenderByMessage(self, message_id):
return self.getFieldByMessage(message_id, "senderid")
#mysqlDataConnect
# for a message_id, return a specific field
def getFieldByMessage(self, message_id, field_name):
cur = self.db.cursor()
cur.execute("SELECT " + field_name + " FROM messages WHERE messageid = " + str(message_id) + " LIMIT 1")
row=cur.fetchone()
return row[0]
# returns an array where each value is a string of the form "<message_id> <seconds from epoch timestamp>""
def getMessageTimes(self):
cur = self.db.cursor()
cur.execute("SELECT messageid, UNIX_TIMESTAMP(messagedt) FROM messages")
data = []
for row in cur.fetchall():
data.append(str(row[0]) + " " + str(row[1]))
return data
# returns an array where each value is a string of the form "<message_id> <seconds from epoch timestamp> <sender_id>"
# def getMessageTimesAndSenders(self):
# cur = self.db.cursor()
# cur.execute("SELECT messageid, UNIX_TIMESTAMP(messagedt), senderid FROM messages")
# data = []
# for row in cur.fetchall():
# data.append(str(row[0]) + " " + str(row[1]) + " " + row[2])
# return data
#mysqlDataConnect
# returns an array where the first element is the message timestamp, the second is the subject, and the third is the body
def getMessageBodyFromMessageID(self, message_id):
cur = self.db.cursor()
cur.execute("SELECT messages.messagedt, messages.subject, bodies.body FROM messages, bodies"
+ " WHERE messages.messageid=bodies.messageid AND messages.messageid=" + str(message_id))
row=cur.fetchone()
data = []
data.append(row[0])
data.append(row[1])
data.append(row[2])
return data
#mysqlDataConnect
# returns a the subject string for a message_id
def getMessageSubjectFromMessageID(self, message_id):
cur = self.db.cursor()
cur.execute("SELECT messages.subject FROM messages WHERE messages.messageid=" + str(message_id))
row=cur.fetchone()
return row[0]
#mysqlDataConnect
# for user_ids user_from and user_to, return an array where each value is a message that user_from sent and user_to received
# Each value is of the form "<message_id> : <timestamp> : <subject>"
def getMessagesFromUserToUser(self, user_from, user_to):
cur = self.db.cursor()
cur.execute("SELECT messages.messageid, messages.messagedt, messages.subject"
+ " FROM messages, recipients WHERE messages.messageid=recipients.messageid AND recipients.personid="
+ str(user_to) + " AND messages.senderid=" + str(user_from)
+ " GROUP BY messages.messageid ORDER BY messages.messagedt")
data = []
for row in cur.fetchall():
data.append(str(row[0]) + " : " + str(row[1]) + " : " + row[2] + "\n")
return data
#mysqlDataConnect
#returns the username string from a user_id
#If no name is available, return the email address instead
def getUserNameFromID(self, user_id):
cur = self.db.cursor()
cur.execute("SELECT * FROM people WHERE personid=" + str(user_id))
row=cur.fetchone()
if (row is None):
return str(user_id) + " "
email_address = row[1]
name = row[2]
if (name is None or len(name) == 0):
return email_address
return name
# mysqlDataConnect
# return messages [start_message, end_message)
def getMessages(self, start_message, end_message):
messages_data = {}
cur = self.db.cursor()
cur.execute("SELECT * FROM bodies WHERE messageid >=" + str(start_message) + " AND messageid <" + str(end_message) + " ORDER BY messageid")
for message in cur.fetchall():
messageid = message[0]
body = message[1]
messages_data[messageid] = body
return messages_data
# mysqlDataConnect
# return sparse matrix
def getTFIDFFromDatabase(self, message_count):
ret_row = []
ret_col = []
ret_data = []
cur = self.db.cursor()
cur.execute("SELECT * FROM tf_idf_dictionary")
word_ids = {}
for tfidf_row in cur.fetchall():
ret_row.append(tfidf_row[0]) #word_id
ret_col.append(tfidf_row[1]) #message_id
ret_data.append(tfidf_row[2]) #count
word_ids[tfidf_row[0]] = 1
ret_matrix = ss.csr_matrix((ret_data, (ret_row, ret_col)), shape=(len(word_ids), message_count))
return ret_matrix
# mysqlDataConnect
# save tfidf data to database
def clearTFIDFToDatabase(self):
cur = self.db.cursor()
cur.execute("DROP TABLE IF EXISTS tf_idf_dictionary")
cur.execute("DROP TABLE IF EXISTS tf_idf_wordmap")
cur.execute("CREATE TABLE tf_idf_dictionary (word int(10), messageid int(10), count int(10), INDEX(word), INDEX(messageid), INDEX(count)) ENGINE=MyISAM")
cur.execute("CREATE TABLE tf_idf_wordmap (word_id int(10), word varchar(255), PRIMARY KEY(word_id), UNIQUE(word)) ENGINE=MyISAM ;")
# mysqlDataConnect
# save one row to the database
def setTFIDFToDatabase(self, word_id, messageid, count):
cur = self.db.cursor()
cur.execute("INSERT INTO tf_idf_dictionary VALUES(" + str(word_id) + ", " + str(messageid) + ", " + str(count) + ")")
# mysqlDataConnect
# save one word_id to the database
def setTFIDFWordIDToDatabase(self, word, word_id):
cur = self.db.cursor()
cur.execute("INSERT INTO tf_idf_wordmap VALUES(" + str(word_id) + ", \"" + word + "\")")
| {
"content_hash": "829d1d1164c19031aca54aada173c235",
"timestamp": "",
"source": "github",
"line_count": 811,
"max_line_length": 169,
"avg_line_length": 33.61898890258939,
"alnum_prop": 0.6689895470383276,
"repo_name": "AutonlabCMU/ActiveSearch",
"id": "e2d452ac7c10a44463f9acf1bad6595b40c039b4",
"size": "27286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mysql_connect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "157571"
},
{
"name": "Matlab",
"bytes": "19893"
},
{
"name": "Perl",
"bytes": "13963"
},
{
"name": "Python",
"bytes": "117153"
}
],
"symlink_target": ""
} |
"""
Unit tests for OpenStack Cinder volume driver
"""
import base64
import urllib2
import mox as mox_lib
from cinder import context
from cinder import db
from cinder import test
from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import iscsi
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import nfs
class TestNexentaISCSIDriver(test.TestCase):
TEST_VOLUME_NAME = 'volume1'
TEST_VOLUME_NAME2 = 'volume2'
TEST_SNAPSHOT_NAME = 'snapshot1'
TEST_VOLUME_REF = {
'name': TEST_VOLUME_NAME,
'size': 1,
'id': '1'
}
TEST_VOLUME_REF2 = {
'name': TEST_VOLUME_NAME2,
'size': 1,
'id': '2'
}
TEST_SNAPSHOT_REF = {
'name': TEST_SNAPSHOT_NAME,
'volume_name': TEST_VOLUME_NAME,
}
def __init__(self, method):
super(TestNexentaISCSIDriver, self).__init__(method)
def setUp(self):
super(TestNexentaISCSIDriver, self).setUp()
self.configuration = mox_lib.MockObject(conf.Configuration)
self.configuration.nexenta_host = '1.1.1.1'
self.configuration.nexenta_user = 'admin'
self.configuration.nexenta_password = 'nexenta'
self.configuration.nexenta_volume = 'cinder'
self.configuration.nexenta_rest_port = 2000
self.configuration.nexenta_rest_protocol = 'http'
self.configuration.nexenta_iscsi_target_portal_port = 3260
self.configuration.nexenta_target_prefix = 'iqn:'
self.configuration.nexenta_target_group_prefix = 'cinder/'
self.configuration.nexenta_blocksize = '8K'
self.configuration.nexenta_sparse = True
self.nms_mock = self.mox.CreateMockAnything()
for mod in ['volume', 'zvol', 'iscsitarget', 'appliance',
'stmf', 'scsidisk', 'snapshot']:
setattr(self.nms_mock, mod, self.mox.CreateMockAnything())
self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
lambda *_, **__: self.nms_mock)
self.drv = iscsi.NexentaISCSIDriver(configuration=self.configuration)
self.drv.do_setup({})
def test_setup_error(self):
self.nms_mock.volume.object_exists('cinder').AndReturn(True)
self.mox.ReplayAll()
self.drv.check_for_setup_error()
def test_setup_error_fail(self):
self.nms_mock.volume.object_exists('cinder').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(LookupError, self.drv.check_for_setup_error)
def test_local_path(self):
self.assertRaises(NotImplementedError, self.drv.local_path, '')
def test_create_volume(self):
self.nms_mock.zvol.create('cinder/volume1', '1G', '8K', True)
self.nms_mock.stmf.list_targets()
self.nms_mock.iscsitarget.create_target({'target_name': 'iqn:volume1'})
self.nms_mock.stmf.list_targetgroups()
self.nms_mock.stmf.create_targetgroup('cinder/volume1')
self.nms_mock.stmf.list_targetgroup_members('cinder/volume1')
self.nms_mock.stmf.add_targetgroup_member('cinder/volume1',
'iqn:volume1')
self.nms_mock.scsidisk.lu_exists('cinder/volume1')
self.nms_mock.scsidisk.create_lu('cinder/volume1', {})
self.nms_mock.scsidisk.lu_shared('cinder/volume1')
self.nms_mock.scsidisk.add_lun_mapping_entry(
'cinder/volume1', {'target_group': 'cinder/volume1', 'lun': '0'})
self.mox.ReplayAll()
self.drv.create_volume(self.TEST_VOLUME_REF)
def test_delete_volume(self):
self.nms_mock.zvol.destroy('cinder/volume1', '')
self.mox.ReplayAll()
self.drv.delete_volume(self.TEST_VOLUME_REF)
def test_create_cloned_volume(self):
vol = self.TEST_VOLUME_REF2
src_vref = self.TEST_VOLUME_REF
snapshot = {
'volume_name': src_vref['name'],
'name': 'cinder-clone-snap-%s' % vol['id'],
}
self.nms_mock.zvol.create_snapshot('cinder/%s' % src_vref['name'],
snapshot['name'], '')
cmd = 'zfs send %(src_vol)s@%(src_snap)s | zfs recv %(volume)s' % {
'src_vol': 'cinder/%s' % src_vref['name'],
'src_snap': snapshot['name'],
'volume': 'cinder/%s' % vol['name']
}
self.nms_mock.appliance.execute(cmd)
self.nms_mock.snapshot.destroy('cinder/%s@%s' % (src_vref['name'],
snapshot['name']), '')
self.nms_mock.snapshot.destroy('cinder/%s@%s' % (vol['name'],
snapshot['name']), '')
self.mox.ReplayAll()
self.drv.create_cloned_volume(vol, src_vref)
def test_create_snapshot(self):
self.nms_mock.zvol.create_snapshot('cinder/volume1', 'snapshot1', '')
self.mox.ReplayAll()
self.drv.create_snapshot(self.TEST_SNAPSHOT_REF)
def test_create_volume_from_snapshot(self):
self.nms_mock.zvol.clone('cinder/volume1@snapshot1', 'cinder/volume2')
self.mox.ReplayAll()
self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2,
self.TEST_SNAPSHOT_REF)
def test_delete_snapshot(self):
self.nms_mock.snapshot.destroy('cinder/volume1@snapshot1', '')
self.mox.ReplayAll()
self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF)
self.mox.ResetAll()
# Check that exception not raised if snapshot does not exist
mock = self.nms_mock.snapshot.destroy('cinder/volume1@snapshot1', '')
mock.AndRaise(nexenta.NexentaException(
'Snapshot cinder/volume1@snapshot1 does not exist'))
self.mox.ReplayAll()
self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF)
_CREATE_EXPORT_METHODS = [
('stmf', 'list_targets', tuple(), [], False, ),
('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
u'Unable to create iscsi target\n'
u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already'
u' configured\n'
u' itadm create-target failed with error 17\n', True, ),
('stmf', 'list_targetgroups', tuple(), [], False, ),
('stmf', 'create_targetgroup', ('cinder/volume1',),
u'Unable to create targetgroup: stmfadm: cinder/volume1:'
u' already exists\n', True, ),
('stmf', 'list_targetgroup_members', ('cinder/volume1', ), [],
False, ),
('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'),
u'Unable to add member to targetgroup: stmfadm:'
u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n',
True, ),
('scsidisk', 'lu_exists', ('cinder/volume1', ), 0, False, ),
('scsidisk', 'create_lu', ('cinder/volume1', {}),
u"Unable to create lu with zvol 'cinder/volume1':\n"
u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n",
True, ),
('scsidisk', 'lu_shared', ('cinder/volume1', ), 0, False, ),
('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', {
'target_group': 'cinder/volume1', 'lun': '0'}),
u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n"
u" stmfadm: view entry exists\n", True, ),
]
def _stub_export_method(self, module, method, args, error, raise_exception,
fail=False):
m = getattr(self.nms_mock, module)
m = getattr(m, method)
mock = m(*args)
if raise_exception and fail:
mock.AndRaise(nexenta.NexentaException(error))
else:
mock.AndReturn(error)
def _stub_all_export_methods(self, fail=False):
for params in self._CREATE_EXPORT_METHODS:
self._stub_export_method(*params, fail=fail)
def test_create_export(self):
self._stub_all_export_methods()
self.mox.ReplayAll()
retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
location = '%(host)s:%(port)s,1 %(prefix)s%(volume)s 0' % {
'host': self.configuration.nexenta_host,
'port': self.configuration.nexenta_iscsi_target_portal_port,
'prefix': self.configuration.nexenta_target_prefix,
'volume': self.TEST_VOLUME_NAME
}
self.assertEqual(retval, {'provider_location': location})
def __get_test(i):
def _test_create_export_fail(self):
for params in self._CREATE_EXPORT_METHODS[:i]:
self._stub_export_method(*params)
self._stub_export_method(*self._CREATE_EXPORT_METHODS[i],
fail=True)
self.mox.ReplayAll()
self.assertRaises(nexenta.NexentaException,
self.drv.create_export,
{},
self.TEST_VOLUME_REF)
return _test_create_export_fail
for i in range(len(_CREATE_EXPORT_METHODS)):
if i % 2:
locals()['test_create_export_fail_%d' % i] = __get_test(i)
def test_ensure_export(self):
self._stub_all_export_methods(fail=True)
self.mox.ReplayAll()
self.drv.ensure_export({}, self.TEST_VOLUME_REF)
def test_remove_export(self):
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1')
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
def test_remove_export_fail_0(self):
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
self.nms_mock.stmf.destroy_targetgroup(
'cinder/volume1').AndRaise(nexenta.NexentaException())
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
def test_remove_export_fail_1(self):
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1')
self.nms_mock.iscsitarget.delete_target(
'iqn:volume1').AndRaise(nexenta.NexentaException())
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
def test_get_volume_stats(self):
stats = {'size': '5368709120G',
'used': '5368709120G',
'available': '5368709120G',
'health': 'ONLINE'}
self.nms_mock.volume.get_child_props(
self.configuration.nexenta_volume,
'health|size|used|available').AndReturn(stats)
self.mox.ReplayAll()
stats = self.drv.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
self.assertEqual(stats['total_capacity_gb'], 5368709120.0)
self.assertEqual(stats['free_capacity_gb'], 5368709120.0)
self.assertEqual(stats['reserved_percentage'], 0)
self.assertEqual(stats['QoS_support'], False)
class TestNexentaJSONRPC(test.TestCase):
HOST = 'example.com'
URL = 'http://%s/' % HOST
URL_S = 'https://%s/' % HOST
USER = 'user'
PASSWORD = 'password'
HEADERS = {
'Authorization':
'Basic %s' % base64.b64encode('%s:%s' % (USER, PASSWORD)),
'Content-Type': 'application/json'
}
REQUEST = 'the request'
def setUp(self):
super(TestNexentaJSONRPC, self).setUp()
self.proxy = jsonrpc.NexentaJSONProxy(
'http', self.HOST, 2000, '/', self.USER, self.PASSWORD, auto=True)
self.mox.StubOutWithMock(urllib2, 'Request', True)
self.mox.StubOutWithMock(urllib2, 'urlopen')
self.resp_mock = self.mox.CreateMockAnything()
self.resp_info_mock = self.mox.CreateMockAnything()
self.resp_mock.info().AndReturn(self.resp_info_mock)
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
def test_call(self):
urllib2.Request(
'http://%s:2000/' % self.HOST,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
self.mox.ReplayAll()
result = self.proxy('arg1', 'arg2')
self.assertEqual("the result", result)
def test_call_deep(self):
urllib2.Request(
'http://%s:2000/' % self.HOST,
'{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
' "method": "meth"}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
self.mox.ReplayAll()
result = self.proxy.obj1.subobj.meth('arg1', 'arg2')
self.assertEqual("the result", result)
def test_call_auto(self):
urllib2.Request(
'http://%s:2000/' % self.HOST,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(
'https://%s:2000/' % self.HOST,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
self.mox.ReplayAll()
result = self.proxy('arg1', 'arg2')
self.assertEqual("the result", result)
def test_call_error(self):
urllib2.Request(
'http://%s:2000/' % self.HOST,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": {"message": "the error"}, "result": "the result"}')
self.mox.ReplayAll()
self.assertRaises(jsonrpc.NexentaJSONException,
self.proxy, 'arg1', 'arg2')
def test_call_fail(self):
urllib2.Request(
'http://%s:2000/' % self.HOST,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.proxy.auto = False
self.mox.ReplayAll()
self.assertRaises(jsonrpc.NexentaJSONException,
self.proxy, 'arg1', 'arg2')
class TestNexentaNfsDriver(test.TestCase):
TEST_EXPORT1 = 'host1:/volumes/stack/share'
TEST_NMS1 = 'http://admin:nexenta@host1:2000'
TEST_EXPORT2 = 'host2:/volumes/stack/share'
TEST_NMS2 = 'http://admin:nexenta@host2:2000'
TEST_EXPORT2_OPTIONS = '-o intr'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf'
TEST_SHARE_SVC = 'svc:/network/nfs/server:default'
TEST_SHARE_OPTS = {
'read_only': '',
'read_write': '*',
'recursive': 'true',
'anonymous_rw': 'true',
'extra_options': 'anon=0',
'root': 'nobody'
}
def _create_volume_db_entry(self):
vol = {
'id': '1',
'size': 1,
'status': 'available',
'provider_location': self.TEST_EXPORT1
}
return db.volume_create(self.ctxt, vol)['id']
def setUp(self):
super(TestNexentaNfsDriver, self).setUp()
self.ctxt = context.get_admin_context()
self.configuration = mox_lib.MockObject(conf.Configuration)
self.configuration.nexenta_shares_config = None
self.configuration.nexenta_mount_point_base = '$state_path/mnt'
self.configuration.nexenta_sparsed_volumes = True
self.configuration.nexenta_volume_compression = 'on'
self.configuration.nfs_mount_point_base = '/mnt/test'
self.configuration.nfs_mount_options = None
self.configuration.nexenta_nms_cache_volroot = False
self.nms_mock = self.mox.CreateMockAnything()
for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc',
'snapshot'):
setattr(self.nms_mock, mod, self.mox.CreateMockAnything())
self.nms_mock.__hash__ = lambda *_, **__: 1
self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
lambda *_, **__: self.nms_mock)
self.drv = nfs.NexentaNfsDriver(configuration=self.configuration)
self.drv.shares = {}
self.drv.share2nms = {}
def test_check_for_setup_error(self):
self.drv.share2nms = {
'host1:/volumes/stack/share': self.nms_mock
}
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.volume.object_exists('stack').AndReturn(True)
self.nms_mock.folder.object_exists('stack/share').AndReturn(True)
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
self.nms_mock.netstorsvc.share_folder(
'svc:/network/nfs/server:default', 'stack/share', share_opts)
self.mox.ReplayAll()
self.drv.check_for_setup_error()
self.mox.ResetAll()
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.volume.object_exists('stack').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(LookupError, self.drv.check_for_setup_error)
self.mox.ResetAll()
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.volume.object_exists('stack').AndReturn(True)
self.nms_mock.folder.object_exists('stack/share').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(LookupError, self.drv.check_for_setup_error)
def test_initialize_connection(self):
self.drv.shares = {
self.TEST_EXPORT1: None
}
volume = {
'provider_location': self.TEST_EXPORT1,
'name': 'volume'
}
result = self.drv.initialize_connection(volume, None)
self.assertEqual(result['data']['export'],
'%s/volume' % self.TEST_EXPORT1)
def test_do_create_volume(self):
volume = {
'provider_location': self.TEST_EXPORT1,
'size': 1,
'name': 'volume-1'
}
self.drv.shares = {self.TEST_EXPORT1: None}
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
compression = self.configuration.nexenta_volume_compression
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.folder.create_with_props(
'stack', 'share/volume-1', {'compression': compression})
self.nms_mock.netstorsvc.share_folder(self.TEST_SHARE_SVC,
'stack/share/volume-1',
self.TEST_SHARE_OPTS)
self.nms_mock.appliance.execute(
'dd if=/dev/zero of=/volumes/stack/share/volume-1/volume bs=1M '
'count=0 seek=1024'
)
self.nms_mock.appliance.execute('chmod ugo+rw '
'/volumes/stack/share/volume-1/volume')
self.mox.ReplayAll()
self.drv._do_create_volume(volume)
self.mox.ResetAll()
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.folder.create_with_props(
'stack', 'share/volume-1', {'compression': compression})
self.nms_mock.netstorsvc.share_folder(
self.TEST_SHARE_SVC, 'stack/share/volume-1',
self.TEST_SHARE_OPTS).AndRaise(nexenta.NexentaException('-'))
self.nms_mock.folder.destroy('stack/share/volume-1')
self.mox.ReplayAll()
self.assertRaises(nexenta.NexentaException, self.drv._do_create_volume,
volume)
def test_create_sparsed_file(self):
self.nms_mock.appliance.execute('dd if=/dev/zero of=/tmp/path bs=1M '
'count=0 seek=1024')
self.mox.ReplayAll()
self.drv._create_sparsed_file(self.nms_mock, '/tmp/path', 1)
def test_create_regular_file(self):
self.nms_mock.appliance.execute('dd if=/dev/zero of=/tmp/path bs=1M '
'count=1024')
self.mox.ReplayAll()
self.drv._create_regular_file(self.nms_mock, '/tmp/path', 1)
def test_set_rw_permissions_for_all(self):
path = '/tmp/path'
self.nms_mock.appliance.execute('chmod ugo+rw %s' % path)
self.mox.ReplayAll()
self.drv._set_rw_permissions_for_all(self.nms_mock, path)
def test_local_path(self):
volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'}
path = self.drv.local_path(volume)
self.assertEqual(
path,
'$state_path/mnt/b3f660847a52b29ac330d8555e4ad669/volume-1/volume'
)
def test_remote_path(self):
volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'}
path = self.drv.remote_path(volume)
self.assertEqual(path, '/volumes/stack/share/volume-1/volume')
def test_share_folder(self):
path = 'stack/share/folder'
self.nms_mock.netstorsvc.share_folder(self.TEST_SHARE_SVC, path,
self.TEST_SHARE_OPTS)
self.mox.ReplayAll()
self.drv._share_folder(self.nms_mock, 'stack', 'share/folder')
def test_load_shares_config(self):
self.drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
self.mox.StubOutWithMock(self.drv, '_read_config_file')
config_data = [
'%s %s' % (self.TEST_EXPORT1, self.TEST_NMS1),
'# %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2),
'',
'%s %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2,
self.TEST_EXPORT2_OPTIONS)
]
self.drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
self.mox.ReplayAll()
self.drv._load_shares_config(self.drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_EXPORT1, self.drv.shares)
self.assertIn(self.TEST_EXPORT2, self.drv.shares)
self.assertEqual(len(self.drv.shares), 2)
self.assertIn(self.TEST_EXPORT1, self.drv.share2nms)
self.assertIn(self.TEST_EXPORT2, self.drv.share2nms)
self.assertEqual(len(self.drv.share2nms.keys()), 2)
self.assertEqual(self.drv.shares[self.TEST_EXPORT2],
self.TEST_EXPORT2_OPTIONS)
self.mox.VerifyAll()
def test_get_capacity_info(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.folder.get_child_props('stack/share', '').AndReturn({
'available': '1G',
'used': '2G'
})
self.mox.ReplayAll()
total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1)
self.assertEqual(total, 3 * units.GiB)
self.assertEqual(free, units.GiB)
self.assertEqual(allocated, 2 * units.GiB)
def test_get_share_datasets(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.mox.ReplayAll()
volume_name, folder_name = \
self.drv._get_share_datasets(self.TEST_EXPORT1)
self.assertEqual(volume_name, 'stack')
self.assertEqual(folder_name, 'share')
def test_delete_snapshot(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self._create_volume_db_entry()
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.snapshot.destroy('stack/share/volume-1@snapshot1', '')
self.mox.ReplayAll()
self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'})
self.mox.ResetAll()
# Check that exception not raised if snapshot does not exist on
# NexentaStor appliance.
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
mock = self.nms_mock.snapshot.destroy('stack/share/volume-1@snapshot1',
'')
mock.AndRaise(nexenta.NexentaException("Snapshot does not exist"))
self.mox.ReplayAll()
self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'})
self.mox.ResetAll()
def test_delete_volume(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self._create_volume_db_entry()
self.drv._ensure_share_mounted = lambda *_, **__: 0
self.drv._execute = lambda *_, **__: 0
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
self.nms_mock.folder.destroy('stack/share/volume-1', '')
self.mox.ReplayAll()
self.drv.delete_volume({
'id': '1',
'name': 'volume-1',
'provider_location': self.TEST_EXPORT1
})
self.mox.ResetAll()
# Check that exception not raised if folder does not exist on
# NexentaStor appliance.
self.nms_mock.server.get_prop('volroot').AndReturn('/volumes')
mock = self.nms_mock.folder.destroy('stack/share/volume-1', '')
mock.AndRaise(nexenta.NexentaException("Folder does not exist"))
self.mox.ReplayAll()
self.drv.delete_volume({
'id': '1',
'name': 'volume-1',
'provider_location': self.TEST_EXPORT1
})
self.mox.ResetAll()
| {
"content_hash": "1cf4c2731208feaaab36a8998f2cf6a1",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 79,
"avg_line_length": 40.284615384615385,
"alnum_prop": 0.5860225319839603,
"repo_name": "cloudbau/cinder",
"id": "b5318320c3bce258e8d3bd9443b746838072bfdb",
"size": "26889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/test_nexenta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5235714"
},
{
"name": "Shell",
"bytes": "8994"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.dispatch.dispatcher import receiver
from model_utils.models import TimeStampedModel
from registration.signals import user_registered
_all = ["Asset", "Profile", "user_registered_callback"]
__author__ = "pmeier82"
# PROFILE
class Profile(models.Model):
"""user profile model"""
class Meta:
app_label = "base"
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
unique=True)
title = models.CharField(
max_length=255,
default="",
)
affiliation = models.CharField(
max_length=255,
default="",
)
research_area = models.TextField(
default=""
)
# TODO: build a meaningful profile
# special
def __str__(self):
return self.user.username
def __unicode__(self):
return unicode(self.__str__())
def user_registered_callback(user, request, **kwargs):
profile, is_new = Profile.objects.get_or_create(user=user)
profile.title = request.POST.get("title", "test")
profile.save()
user_registered.connect(user_registered_callback)
# ASSET
def UPLOAD_TO_HANDLER(obj, fname):
folder = getattr(obj, "kind", "default")
return "{}/{}".format(folder, fname)
class Asset(TimeStampedModel):
"""generic file asset model"""
class Meta:
app_label = "base"
UPLOAD_TO = "default"
# fields
name = models.CharField(max_length=255, unique=False)
data_orig_name = models.CharField(max_length=255, unique=False)
data = models.FileField(upload_to=UPLOAD_TO_HANDLER)
kind = models.CharField(max_length=255, unique=False, null=False, default=UPLOAD_TO)
# generic foreign key
content_type = models.ForeignKey(ContentType, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = generic.GenericForeignKey()
# special methods
def __unicode__(self):
return unicode("{}: {}".format(self.__class__.__name__, self.name))
# django special methods
@models.permalink
def get_absolute_url(self):
return "asset:serve", (self.pk,), {}
@models.permalink
def get_delete_url(self):
return "asset:delete", (self.pk,), {}
# interface
def save(self, *args, **kwargs):
super(Asset, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(Asset, self).delete(*args, **kwargs)
@receiver(models.signals.pre_delete, sender=Asset)
def asset_file_delete(sender, instance, **kwargs):
instance.data.delete()
if __name__ == "__main__":
pass
| {
"content_hash": "85f7dee85ba6de3d5a267ea68a913604",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 88,
"avg_line_length": 25.757009345794394,
"alnum_prop": 0.6556603773584906,
"repo_name": "pmeier82/spike_gnode",
"id": "2411abbf4b4c2c62fd144aaa9261d587c405219b",
"size": "2781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11360"
},
{
"name": "HTML",
"bytes": "56070"
},
{
"name": "JavaScript",
"bytes": "734"
},
{
"name": "Nginx",
"bytes": "4758"
},
{
"name": "Python",
"bytes": "37001"
},
{
"name": "Shell",
"bytes": "2475"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos import draw
import pyglet
import random
ri = random.randint
class TestFigure(draw.Canvas):
def render(self):
x,y = director.get_window_size()
for i in range(100):
start = ri(0,640), ri(0,480)
end = ri(0,640), ri(0,480)
color = ri(00,255),ri(00,255),ri(00,255),ri(00,255)
width = ri(1,20)
if (random.random() < 0.3) :
self.set_color( color )
self.set_stroke_width( width )
self.move_to( start )
self.line_to( end )
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
self.add( TestFigure() )
self.schedule( lambda x: 0 )
if __name__ == "__main__":
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
| {
"content_hash": "f374a6374bc239bd0d6d0c125e96455f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 65,
"avg_line_length": 25.431818181818183,
"alnum_prop": 0.5460232350312779,
"repo_name": "adamwiggins/cocos2d",
"id": "8783e8ccfea9c33d2f5482634f8c88e8c155c202",
"size": "1192",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_draw.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "825818"
},
{
"name": "Shell",
"bytes": "3018"
}
],
"symlink_target": ""
} |
import contextlib
import socket
import mock
import testtools
from tempest.common import ssh
from tempest import exceptions
from tempest.tests import base
class TestSshClient(base.TestCase):
def test_pkey_calls_paramiko_RSAKey(self):
with contextlib.nested(
mock.patch('paramiko.RSAKey.from_private_key'),
mock.patch('cStringIO.StringIO')) as (rsa_mock, cs_mock):
cs_mock.return_value = mock.sentinel.csio
pkey = 'mykey'
ssh.Client('localhost', 'root', pkey=pkey)
rsa_mock.assert_called_once_with(mock.sentinel.csio)
cs_mock.assert_called_once_with('mykey')
rsa_mock.reset_mock()
cs_mock.rest_mock()
pkey = mock.sentinel.pkey
# Shouldn't call out to load a file from RSAKey, since
# a sentinel isn't a basestring...
ssh.Client('localhost', 'root', pkey=pkey)
rsa_mock.assert_not_called()
cs_mock.assert_not_called()
def test_get_ssh_connection(self):
c_mock = self.patch('paramiko.SSHClient')
aa_mock = self.patch('paramiko.AutoAddPolicy')
s_mock = self.patch('time.sleep')
t_mock = self.patch('time.time')
aa_mock.return_value = mock.sentinel.aa
def reset_mocks():
aa_mock.reset_mock()
c_mock.reset_mock()
s_mock.reset_mock()
t_mock.reset_mock()
# Test normal case for successful connection on first try
client_mock = mock.MagicMock()
c_mock.return_value = client_mock
client_mock.connect.return_value = True
client = ssh.Client('localhost', 'root', timeout=2)
client._get_ssh_connection(sleep=1)
aa_mock.assert_called_once_with()
client_mock.set_missing_host_key_policy.assert_called_once_with(
mock.sentinel.aa)
expected_connect = [mock.call(
'localhost',
username='root',
pkey=None,
key_filename=None,
look_for_keys=False,
timeout=10.0,
password=None
)]
self.assertEqual(expected_connect, client_mock.connect.mock_calls)
s_mock.assert_not_called()
t_mock.assert_called_once_with()
reset_mocks()
# Test case when connection fails on first two tries and
# succeeds on third try (this validates retry logic)
client_mock.connect.side_effect = [socket.error, socket.error, True]
t_mock.side_effect = [
1000, # Start time
1000, # LOG.warning() calls time.time() loop 1
1001, # Sleep loop 1
1001, # LOG.warning() calls time.time() loop 2
1002 # Sleep loop 2
]
client._get_ssh_connection(sleep=1)
expected_sleeps = [
mock.call(2),
mock.call(3)
]
self.assertEqual(expected_sleeps, s_mock.mock_calls)
reset_mocks()
# Test case when connection fails on first three tries and
# exceeds the timeout, so expect to raise a Timeout exception
client_mock.connect.side_effect = [
socket.error,
socket.error,
socket.error
]
t_mock.side_effect = [
1000, # Start time
1000, # LOG.warning() calls time.time() loop 1
1001, # Sleep loop 1
1001, # LOG.warning() calls time.time() loop 2
1002, # Sleep loop 2
1003, # Sleep loop 3
1004 # LOG.error() calls time.time()
]
with testtools.ExpectedException(exceptions.SSHTimeout):
client._get_ssh_connection()
def test_exec_command(self):
gsc_mock = self.patch('tempest.common.ssh.Client._get_ssh_connection')
ito_mock = self.patch('tempest.common.ssh.Client._is_timed_out')
select_mock = self.patch('select.poll')
client_mock = mock.MagicMock()
tran_mock = mock.MagicMock()
chan_mock = mock.MagicMock()
poll_mock = mock.MagicMock()
def reset_mocks():
gsc_mock.reset_mock()
ito_mock.reset_mock()
select_mock.reset_mock()
poll_mock.reset_mock()
client_mock.reset_mock()
tran_mock.reset_mock()
chan_mock.reset_mock()
select_mock.return_value = poll_mock
gsc_mock.return_value = client_mock
ito_mock.return_value = True
client_mock.get_transport.return_value = tran_mock
tran_mock.open_session.return_value = chan_mock
poll_mock.poll.side_effect = [
[0, 0, 0]
]
# Test for a timeout condition immediately raised
client = ssh.Client('localhost', 'root', timeout=2)
with testtools.ExpectedException(exceptions.TimeoutException):
client.exec_command("test")
chan_mock.fileno.assert_called_once_with()
chan_mock.exec_command.assert_called_once_with("test")
chan_mock.shutdown_write.assert_called_once_with()
SELECT_POLLIN = 1
poll_mock.register.assert_called_once_with(chan_mock, SELECT_POLLIN)
poll_mock.poll.assert_called_once_with(10)
# Test for proper reading of STDOUT and STDERROR and closing
# of all file descriptors.
reset_mocks()
select_mock.return_value = poll_mock
gsc_mock.return_value = client_mock
ito_mock.return_value = False
client_mock.get_transport.return_value = tran_mock
tran_mock.open_session.return_value = chan_mock
poll_mock.poll.side_effect = [
[1, 0, 0]
]
closed_prop = mock.PropertyMock(return_value=True)
type(chan_mock).closed = closed_prop
chan_mock.recv_exit_status.return_value = 0
chan_mock.recv.return_value = ''
chan_mock.recv_stderr.return_value = ''
client = ssh.Client('localhost', 'root', timeout=2)
client.exec_command("test")
chan_mock.fileno.assert_called_once_with()
chan_mock.exec_command.assert_called_once_with("test")
chan_mock.shutdown_write.assert_called_once_with()
SELECT_POLLIN = 1
poll_mock.register.assert_called_once_with(chan_mock, SELECT_POLLIN)
poll_mock.poll.assert_called_once_with(10)
chan_mock.recv_ready.assert_called_once_with()
chan_mock.recv.assert_called_once_with(1024)
chan_mock.recv_stderr_ready.assert_called_once_with()
chan_mock.recv_stderr.assert_called_once_with(1024)
chan_mock.recv_exit_status.assert_called_once_with()
closed_prop.assert_called_once_with()
| {
"content_hash": "ced1143603fcdcf907419a5ae9cfd79c",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 78,
"avg_line_length": 35.62234042553192,
"alnum_prop": 0.5950425563685232,
"repo_name": "ntymtsiv/tempest",
"id": "a6eedc46681bd0b38132ba21b30a7046fb40c0d0",
"size": "7310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/tests/test_ssh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2312198"
},
{
"name": "Shell",
"bytes": "9160"
}
],
"symlink_target": ""
} |
__author__ = 'Alex'
def boop(my_value): # Has spaces around both.
print my_value
def beep():
value = "Hello"
boop(value) # Space only on the end.
if __name__ == "__main__":
beep() # Spaces between empty braces.
| {
"content_hash": "fdba73c61fb47cff9dbb9c402a099d1b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 46,
"avg_line_length": 18.076923076923077,
"alnum_prop": 0.5787234042553191,
"repo_name": "Ahuge/Pepperoni",
"id": "33b8467bb3aa36d3f183f4ec590935b06cdf6080",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/_resources/brace_error_fixed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7153"
}
],
"symlink_target": ""
} |
""" The default perspective. """
# Standard library imports.
import logging
# Enthought library imports.
from traits.api import Bool, HasTraits, List, provides, Str, Tuple
# Local imports.
from i_perspective import IPerspective
from perspective_item import PerspectiveItem
# Logging.
logger = logging.getLogger(__name__)
@provides(IPerspective)
class Perspective(HasTraits):
""" The default perspective. """
# The ID of the default perspective.
DEFAULT_ID = 'pyface.workbench.default'
# The name of the default perspective.
DEFAULT_NAME = 'Default'
#### 'IPerspective' interface #############################################
# The perspective's unique identifier (unique within a workbench window).
id = Str(DEFAULT_ID)
# The perspective's name.
name = Str(DEFAULT_NAME)
# The contents of the perspective.
contents = List(PerspectiveItem)
# The size of the editor area in this perspective. A value of (-1, -1)
# indicates that the workbench window should choose an appropriate size
# based on the sizes of the views in the perspective.
editor_area_size = Tuple((-1, -1))
# Is the perspective enabled?
enabled = Bool(True)
# Should the editor area be shown in this perspective?
show_editor_area = Bool(True)
###########################################################################
# 'object' interface.
###########################################################################
def __str__(self):
""" Return an informal string representation of the object. """
return 'Perspective(%s)' % self.id
###########################################################################
# 'Perspective' interface.
###########################################################################
#### Initializers #########################################################
def _id_default(self):
""" Trait initializer. """
# If no Id is specified then use the name.
return self.name
#### Methods ##############################################################
def create(self, window):
""" Create the perspective in a workbench window.
For most cases you should just be able to set the 'contents' trait to
lay out views as required. However, you can override this method if
you want to have complete control over how the perspective is created.
"""
# Set the size of the editor area.
if self.editor_area_size != (-1, -1):
window.editor_area_size = self.editor_area_size
# If the perspective has specific contents then add just those.
if len(self.contents) > 0:
self._add_contents(window, self.contents)
# Otherwise, add all of the views defined in the window at their
# default positions realtive to the editor area.
else:
self._add_all(window)
# Activate the first view in every region.
window.reset_views()
return
def show(self, window):
""" Called when the perspective is shown in a workbench window.
The default implementation does nothing, but you can override this
method if you want to do something whenever the perspective is
activated.
"""
return
###########################################################################
# Private interface.
###########################################################################
def _add_contents(self, window, contents):
""" Adds the specified contents. """
# If we are adding specific contents then we ignore any default view
# visibility.
#
# fixme: This is a bit ugly! Why don't we pass the visibility in to
# 'window.add_view'?
for view in window.views:
view.visible = False
for item in contents:
self._add_perspective_item(window, item)
return
def _add_perspective_item(self, window, item):
""" Adds a perspective item to a window. """
# If no 'relative_to' is specified then the view is positioned
# relative to the editor area.
if len(item.relative_to) > 0:
relative_to = window.get_view_by_id(item.relative_to)
else:
relative_to = None
# fixme: This seems a bit ugly, having to reach back up to the
# window to get the view. Maybe its not that bad?
view = window.get_view_by_id(item.id)
if view is not None:
# fixme: This is probably not the ideal way to sync view traits
# and perspective_item traits.
view.style_hint = item.style_hint
# Add the view to the window.
window.add_view(
view, item.position, relative_to, (item.width, item.height)
)
else:
# The reason that we don't just barf here is that a perspective
# might use views from multiple plugins, and we probably want to
# continue even if one or two of them aren't present.
#
# fixme: This is worth keeping an eye on though. If we end up with
# a strict mode that throws exceptions early and often for
# developers, then this might be a good place to throw one ;^)
logger.error('missing view for perspective item <%s>' % item.id)
return
def _add_all(self, window):
""" Adds *all* of the window's views defined in the window. """
for view in window.views:
if view.visible:
self._add_view(window, view)
return
def _add_view(self, window, view):
""" Adds a view to a window. """
# If no 'relative_to' is specified then the view is positioned
# relative to the editor area.
if len(view.relative_to) > 0:
relative_to = window.get_view_by_id(view.relative_to)
else:
relative_to = None
# Add the view to the window.
window.add_view(
view, view.position, relative_to, (view.width, view.height)
)
return
#### EOF ######################################################################
| {
"content_hash": "b6892729185dad15b2f6241c5b7fe435",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 79,
"avg_line_length": 32.55440414507772,
"alnum_prop": 0.5414610854687252,
"repo_name": "pankajp/pyface",
"id": "5fc2496b2a6cfa29a6caa3aabe8627592e93e6d0",
"size": "6283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyface/workbench/perspective.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13515"
},
{
"name": "Python",
"bytes": "2279955"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="colorsrc", parent_name="bar.marker", **kwargs):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "3186b59d30f22c82ef7947de376b3c18",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 37,
"alnum_prop": 0.6081081081081081,
"repo_name": "plotly/python-api",
"id": "8e3d2ef1768d7d1b7de1b4647085ceef87ef28eb",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/bar/marker/_colorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os
import sys
import time
from distutils.version import LooseVersion
from wlauto.core.extension import Parameter, ExtensionMeta, ListCollection
from wlauto.core.workload import Workload
from wlauto.common.android.resources import ApkFile
from wlauto.common.resources import ExtensionAsset, File
from wlauto.exceptions import WorkloadError, ResourceError, DeviceError
from wlauto.utils.android import (ApkInfo, ANDROID_NORMAL_PERMISSIONS,
ANDROID_UNCHANGEABLE_PERMISSIONS, UNSUPPORTED_PACKAGES)
from wlauto.utils.types import boolean, ParameterDict
import wlauto.utils.statedetect as state_detector
from wlauto.common.linux.workload import ReventWorkload
DELAY = 5
# Due to the way `super` works you have to call it at every level but WA executes some
# methods conditionally and so has to call them directly via the class, this breaks super
# and causes it to run things mutiple times ect. As a work around for this untill workloads
# are reworked everything that subclasses workload calls parent methods explicitly
class UiAutomatorWorkload(Workload):
"""
Base class for all workloads that rely on a UI Automator APK file.
This class should be subclassed by workloads that rely on android UiAutomator
to work. This class handles installing the UI Automator APK to the device
and invoking it to run the workload. By default, it will look for the ``*.apk`` file
in the same directory as the .py file for the workload (this can be changed by overriding
the ``uiauto_file`` property in the subclassing workload).
To inintiate UI Automation, the fully-qualified name of the Java class and the
corresponding method name are needed. By default, the package part of the class name
is derived from the class file, and class and method names are ``UiAutomation``
and ``runUiAutomaton`` respectively. If you have generated the boilder plate for the
UiAutomatior code using ``create_workloads`` utility, then everything should be named
correctly. If you're creating the Java project manually, you need to make sure the names
match what is expected, or you could override ``uiauto_package``, ``uiauto_class`` and
``uiauto_method`` class attributes with the value that match your Java code.
You can also pass parameters to the APK file. To do this add the parameters to
``self.uiauto_params`` dict inside your class's ``__init__`` or ``setup`` methods.
"""
supported_platforms = ['android']
uiauto_package = ''
uiauto_class = 'UiAutomation'
uiauto_method = 'android.support.test.runner.AndroidJUnitRunner'
# Can be overidden by subclasses to adjust to run time of specific
# benchmarks.
run_timeout = 10 * 60 # seconds
def __init__(self, device, _call_super=True, **kwargs): # pylint: disable=W0613
if _call_super:
Workload.__init__(self, device, **kwargs)
self.uiauto_file = None
self.command = None
self.uiauto_params = ParameterDict()
def init_resources(self, context):
self.uiauto_file = context.resolver.get(ApkFile(self, uiauto=True))
if not self.uiauto_file:
raise ResourceError('No UI automation APK file found for workload {}.'.format(self.name))
if not self.uiauto_package:
self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
def setup(self, context):
Workload.setup(self, context)
params_dict = self.uiauto_params
params_dict['workdir'] = self.device.working_directory
params = ''
for k, v in self.uiauto_params.iter_encoded_items():
params += ' -e {} "{}"'.format(k, v)
if self.device.package_is_installed(self.uiauto_package):
self.device.uninstall(self.uiauto_package)
self.device.install_apk(self.uiauto_file)
instrumention_string = 'am instrument -w -r {} -e class {}.{} {}/{}'
self.command = instrumention_string.format(params, self.uiauto_package,
self.uiauto_class, self.uiauto_package,
self.uiauto_method)
self.device.killall('uiautomator')
def run(self, context):
result = self.device.execute(self.command, self.run_timeout)
if 'FAILURE' in result:
raise WorkloadError(result)
else:
self.logger.debug(result)
time.sleep(DELAY)
def update_result(self, context):
pass
def teardown(self, context):
self.device.uninstall(self.uiauto_package)
def validate(self):
if not self.uiauto_file:
raise WorkloadError('No UI automation APK file found for workload {}.'.format(self.name))
if not self.uiauto_package:
raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name))
class ApkWorkload(Workload):
"""
A workload based on an APK file.
Defines the following attributes:
:package: The package name of the app. This is usually a Java-style name of the form
``com.companyname.appname``.
:activity: This is the initial activity of the app. This will be used to launch the
app during the setup. Many applications do not specify a launch activity so
this may be left blank if necessary.
:view: The class of the main view pane of the app. This needs to be defined in order
to collect SurfaceFlinger-derived statistics (such as FPS) for the app, but
may otherwise be left as ``None``.
:launch_main: If ``False``, the default activity will not be launched (during setup),
allowing workloads to start the app with an intent of their choice in
the run step. This is useful for apps without a launchable default/main
activity or those where it cannot be launched without intent data (which
is provided at the run phase).
:install_timeout: Timeout for the installation of the APK. This may vary wildly based on
the size and nature of a specific APK, and so should be defined on
per-workload basis.
.. note:: To a lesser extent, this will also vary based on the the
device and the nature of adb connection (USB vs Ethernet),
so, as with all timeouts, so leeway must be included in
the specified value.
:min_apk_version: The minimum supported apk version for this workload. May be ``None``.
:max_apk_version: The maximum supported apk version for this workload. May be ``None``.
.. note:: Both package and activity for a workload may be obtained from the APK using
the ``aapt`` tool that comes with the ADT (Android Developemnt Tools) bundle.
"""
package = None
activity = None
view = None
min_apk_version = None
max_apk_version = None
supported_platforms = ['android']
launch_main = True
parameters = [
Parameter('install_timeout', kind=int, default=300,
description='Timeout for the installation of the apk.'),
Parameter('check_apk', kind=boolean, default=True,
description='''
When set to True the APK file on the host will be prefered if
it is a valid version and ABI, if not it will fall back to the
version on the targer. When set to False the target version is
prefered.
'''),
Parameter('force_install', kind=boolean, default=False,
description='''
Always re-install the APK, even if matching version is found already installed
on the device. Runs ``adb install -r`` to ensure existing APK is replaced. When
this is set, check_apk is ignored.
'''),
Parameter('uninstall_apk', kind=boolean, default=False,
description='If ``True``, will uninstall workload\'s APK as part of teardown.'),
Parameter('exact_abi', kind=bool, default=False,
description='''
If ``True``, workload will check that the APK matches the target
device ABI, otherwise any APK found will be used.
'''),
]
def __init__(self, device, _call_super=True, **kwargs):
if _call_super:
Workload.__init__(self, device, **kwargs)
self.apk_file = None
self.apk_version = None
self.logcat_log = None
self.exact_apk_version = None
def setup(self, context): # pylint: disable=too-many-branches
Workload.setup(self, context)
self.setup_workload_apk(context)
self.launch_application()
self.kill_background()
self.device.clear_logcat()
def setup_workload_apk(self, context):
# Get target version
target_version = self.device.get_installed_package_version(self.package)
if target_version:
target_version = LooseVersion(target_version)
self.logger.debug("Found version '{}' on target device".format(target_version))
# Get host version
self.apk_file = context.resolver.get(ApkFile(self, self.device.abi),
version=getattr(self, 'version', None),
variant_name=getattr(self, 'variant_name', None),
strict=False)
# Get target abi
target_abi = self.device.get_installed_package_abi(self.package)
if target_abi:
self.logger.debug("Found apk with primary abi '{}' on target device".format(target_abi))
# Get host version, primary abi is first, and then try to find supported.
for abi in self.device.supported_abi:
self.apk_file = context.resolver.get(ApkFile(self, abi),
version=getattr(self, 'version', None),
variant_name=getattr(self, 'variant_name', None),
strict=False)
# Stop if apk found, or if exact_abi is set only look for primary abi.
if self.apk_file or self.exact_abi:
break
host_version = self.check_host_version()
self.verify_apk_version(target_version, target_abi, host_version)
if self.force_install:
self.force_install_apk(context, host_version)
elif self.check_apk:
self.prefer_host_apk(context, host_version, target_version)
else:
self.prefer_target_apk(context, host_version, target_version)
self.reset(context)
self.apk_version = self.device.get_installed_package_version(self.package)
context.add_classifiers(apk_version=self.apk_version)
def check_host_version(self):
host_version = None
if self.apk_file is not None:
host_version = ApkInfo(self.apk_file).version_name
if host_version:
host_version = LooseVersion(host_version)
self.logger.debug("Found version '{}' on host".format(host_version))
return host_version
def verify_apk_version(self, target_version, target_abi, host_version):
# Error if apk was not found anywhere
if target_version is None and host_version is None:
msg = "Could not find APK for '{}' on the host or target device"
raise ResourceError(msg.format(self.name))
if self.exact_apk_version is not None:
if self.exact_apk_version != target_version and self.exact_apk_version != host_version:
msg = "APK version '{}' not found on the host '{}' or target '{}'"
raise ResourceError(msg.format(self.exact_apk_version, host_version, target_version))
# Error if exact_abi and suitable apk not found on host and incorrect version on device
if self.exact_abi and host_version is None:
if target_abi != self.device.abi:
msg = "APK abi '{}' not found on the host and target is '{}'"
raise ResourceError(msg.format(self.device.abi, target_abi))
def launch_application(self):
if self.launch_main:
self.launch_package() # launch default activity without intent data
def kill_background(self):
self.device.execute('am kill-all') # kill all *background* activities
def force_install_apk(self, context, host_version):
if host_version is None:
raise ResourceError("force_install is 'True' but could not find APK on the host")
try:
self.validate_version(host_version)
except ResourceError as e:
msg = "force_install is 'True' but the host version is invalid:\n\t{}"
raise ResourceError(msg.format(str(e)))
self.install_apk(context, replace=True)
def prefer_host_apk(self, context, host_version, target_version):
msg = "check_apk is 'True' "
if host_version is None:
try:
self.validate_version(target_version)
except ResourceError as e:
msg += "but the APK was not found on the host and the target version is invalid:\n\t{}"
raise ResourceError(msg.format(str(e)))
else:
msg += "but the APK was not found on the host, using target version"
self.logger.debug(msg)
return
try:
self.validate_version(host_version)
except ResourceError as e1:
msg += "but the host APK version is invalid:\n\t{}\n"
if target_version is None:
msg += "The target does not have the app either"
raise ResourceError(msg.format(str(e1)))
try:
self.validate_version(target_version)
except ResourceError as e2:
msg += "The target version is also invalid:\n\t{}"
raise ResourceError(msg.format(str(e1), str(e2)))
else:
msg += "using the target version instead"
self.logger.debug(msg.format(str(e1)))
else: # Host version is valid
if target_version is not None and target_version == host_version:
msg += " and a matching version is alread on the device, doing nothing"
self.logger.debug(msg)
return
msg += " and the host version is not on the target, installing APK"
self.logger.debug(msg)
self.install_apk(context, replace=True)
def prefer_target_apk(self, context, host_version, target_version):
msg = "check_apk is 'False' "
if target_version is None:
try:
self.validate_version(host_version)
except ResourceError as e:
msg += "but the app was not found on the target and the host version is invalid:\n\t{}"
raise ResourceError(msg.format(str(e)))
else:
msg += "and the app was not found on the target, using host version"
self.logger.debug(msg)
self.install_apk(context)
return
try:
self.validate_version(target_version)
except ResourceError as e1:
msg += "but the target app version is invalid:\n\t{}\n"
if host_version is None:
msg += "The host does not have the APK either"
raise ResourceError(msg.format(str(e1)))
try:
self.validate_version(host_version)
except ResourceError as e2:
msg += "The host version is also invalid:\n\t{}"
raise ResourceError(msg.format(str(e1), str(e2)))
else:
msg += "Using the host APK instead"
self.logger.debug(msg.format(str(e1)))
self.install_apk(context, replace=True)
else:
msg += "and a valid version of the app is already on the target, using target app"
self.logger.debug(msg)
def validate_version(self, version):
min_apk_version = getattr(self, 'min_apk_version', None)
max_apk_version = getattr(self, 'max_apk_version', None)
if min_apk_version is not None and max_apk_version is not None:
if version < LooseVersion(min_apk_version) or \
version > LooseVersion(max_apk_version):
msg = "version '{}' not supported. " \
"Minimum version required: '{}', Maximum version known to work: '{}'"
raise ResourceError(msg.format(version, min_apk_version, max_apk_version))
elif min_apk_version is not None:
if version < LooseVersion(min_apk_version):
msg = "version '{}' not supported. " \
"Minimum version required: '{}'"
raise ResourceError(msg.format(version, min_apk_version))
elif max_apk_version is not None:
if version > LooseVersion(max_apk_version):
msg = "version '{}' not supported. " \
"Maximum version known to work: '{}'"
raise ResourceError(msg.format(version, max_apk_version))
def launch_package(self):
if not self.activity:
output = self.device.execute('am start -W {}'.format(self.package))
else:
output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity))
if 'Error:' in output:
self.device.execute('am force-stop {}'.format(self.package)) # this will dismiss any erro dialogs
raise WorkloadError(output)
self.logger.debug(output)
def reset(self, context): # pylint: disable=W0613
self.device.execute('am force-stop {}'.format(self.package))
self.device.execute('pm clear {}'.format(self.package))
# As of android API level 23, apps can request permissions at runtime,
# this will grant all of them so requests do not pop up when running the app
# This can also be done less "manually" during adb install using the -g flag
if self.device.get_sdk_version() >= 23:
self._grant_requested_permissions()
def install_apk(self, context, replace=False):
success = False
if replace and self.device.package_is_installed(self.package):
self.device.uninstall(self.package)
output = self.device.install_apk(self.apk_file, timeout=self.install_timeout,
replace=replace, allow_downgrade=True)
if 'Failure' in output:
if 'ALREADY_EXISTS' in output:
self.logger.warn('Using already installed APK (did not unistall properly?)')
self.reset(context)
else:
raise WorkloadError(output)
else:
self.logger.debug(output)
success = True
self.do_post_install(context)
return success
def _grant_requested_permissions(self):
dumpsys_output = self.device.execute(command="dumpsys package {}".format(self.package))
permissions = []
lines = iter(dumpsys_output.splitlines())
for line in lines:
if "requested permissions:" in line:
break
for line in lines:
if "android.permission." in line:
permissions.append(line.split(":")[0].strip())
# Matching either of these means the end of requested permissions section
elif "install permissions:" in line or "runtime permissions:" in line:
break
for permission in set(permissions):
# "Normal" Permisions are automatically granted and cannot be changed
permission_name = permission.rsplit('.', 1)[1]
if permission_name not in ANDROID_NORMAL_PERMISSIONS:
# Some permissions are not allowed to be "changed"
if permission_name not in ANDROID_UNCHANGEABLE_PERMISSIONS:
# On some API 23+ devices, this may fail with a SecurityException
# on previously granted permissions. In that case, just skip as it
# is not fatal to the workload execution
try:
self.device.execute("pm grant {} {}".format(self.package, permission))
except DeviceError as e:
if "changeable permission" in e.message or "Unknown permission" in e.message:
self.logger.debug(e)
else:
raise e
def do_post_install(self, context):
""" May be overwritten by derived classes."""
pass
def run(self, context):
pass
def update_result(self, context):
self.logcat_log = os.path.join(context.output_directory, 'logcat.log')
self.device.dump_logcat(self.logcat_log)
context.add_iteration_artifact(name='logcat',
path='logcat.log',
kind='log',
description='Logact dump for the run.')
def teardown(self, context):
self.device.execute('am force-stop {}'.format(self.package))
if self.uninstall_apk:
self.device.uninstall(self.package)
AndroidBenchmark = ApkWorkload # backward compatibility
class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
supported_platforms = ['android']
def __init__(self, device, **kwargs):
UiAutomatorWorkload.__init__(self, device, **kwargs)
AndroidBenchmark.__init__(self, device, _call_super=False, **kwargs)
def initialize(self, context):
UiAutomatorWorkload.initialize(self, context)
AndroidBenchmark.initialize(self, context)
self._check_unsupported_packages()
def init_resources(self, context):
UiAutomatorWorkload.init_resources(self, context)
AndroidBenchmark.init_resources(self, context)
def setup(self, context):
UiAutomatorWorkload.setup(self, context)
AndroidBenchmark.setup(self, context)
def update_result(self, context):
UiAutomatorWorkload.update_result(self, context)
AndroidBenchmark.update_result(self, context)
def teardown(self, context):
UiAutomatorWorkload.teardown(self, context)
AndroidBenchmark.teardown(self, context)
def _check_unsupported_packages(self):
"""
Check for any unsupported package versions and raise an
exception if detected.
"""
for package in UNSUPPORTED_PACKAGES:
version = self.device.get_installed_package_version(package)
if version is None:
continue
if '-' in version:
version = version.split('-')[0] # ignore abi version
if version in UNSUPPORTED_PACKAGES[package]:
message = 'This workload does not support version "{}" of package "{}"'
raise WorkloadError(message.format(version, package))
class AndroidUxPerfWorkloadMeta(ExtensionMeta):
to_propagate = ExtensionMeta.to_propagate + [('deployable_assets', str, ListCollection)]
class AndroidUxPerfWorkload(AndroidUiAutoBenchmark):
__metaclass__ = AndroidUxPerfWorkloadMeta
deployable_assets = []
parameters = [
Parameter('markers_enabled', kind=bool, default=False,
description="""
If ``True``, UX_PERF action markers will be emitted to logcat during
the test run.
"""),
Parameter('clean_assets', kind=bool, default=False,
description="""
If ``True`` pushed assets will be deleted at the end of each iteration
"""),
Parameter('force_push_assets', kind=bool, default=False,
description="""
If ``True`` always push assets on each iteration, even if the
assets already exists in the device path
"""),
]
def _path_on_device(self, fpath, dirname=None):
if dirname is None:
dirname = self.device.working_directory
fname = os.path.basename(fpath)
return self.device.path.join(dirname, fname)
def push_assets(self, context):
pushed = False
file_list = []
for f in self.deployable_assets:
fpath = context.resolver.get(File(self, f))
device_path = self._path_on_device(fpath)
if self.force_push_assets or not self.device.file_exists(device_path):
self.device.push_file(fpath, device_path, timeout=300)
file_list.append(device_path)
pushed = True
if pushed:
self.device.refresh_device_files(file_list)
def delete_assets(self):
if self.deployable_assets:
file_list = []
for f in self.deployable_assets:
f = self._path_on_device(f)
self.device.delete_file(f)
file_list.append(f)
self.device.delete_file(f)
self.device.refresh_device_files(file_list)
def __init__(self, device, **kwargs):
super(AndroidUxPerfWorkload, self).__init__(device, **kwargs)
# Turn class attribute into instance attribute
self.deployable_assets = list(self.deployable_assets)
def validate(self):
super(AndroidUxPerfWorkload, self).validate()
self.uiauto_params['package_name'] = self.package
self.uiauto_params['markers_enabled'] = self.markers_enabled
def setup(self, context):
super(AndroidUxPerfWorkload, self).setup(context)
self.push_assets(context)
def teardown(self, context):
super(AndroidUxPerfWorkload, self).teardown(context)
if self.clean_assets:
self.delete_assets()
class GameWorkload(ApkWorkload, ReventWorkload):
"""
GameWorkload is the base class for all the workload that use revent files to
run.
For more in depth details on how to record revent files, please see
:ref:`revent_files_creation`. To subclass this class, please refer to
:ref:`GameWorkload`.
Additionally, this class defines the following attributes:
:asset_file: A tarball containing additional assets for the workload. These are the assets
that are not part of the APK but would need to be downloaded by the workload
(usually, on first run of the app). Since the presence of a network connection
cannot be assumed on some devices, this provides an alternative means of obtaining
the assets.
:saved_state_file: A tarball containing the saved state for a workload. This tarball gets
deployed in the same way as the asset file. The only difference being that
it is usually much slower and re-deploying the tarball should alone be
enough to reset the workload to a known state (without having to reinstall
the app or re-deploy the other assets).
:loading_time: Time it takes for the workload to load after the initial activity has been
started.
"""
# May be optionally overwritten by subclasses
asset_file = None
saved_state_file = None
view = 'SurfaceView'
loading_time = 10
supported_platforms = ['android']
setup_required = True
parameters = [
Parameter('install_timeout', default=500, override=True),
Parameter('check_states', kind=bool, default=False, global_alias='check_game_states',
description="""Use visual state detection to verify the state of the workload
after setup and run"""),
Parameter('assets_push_timeout', kind=int, default=500,
description='Timeout used during deployment of the assets package (if there is one).'),
Parameter('clear_data_on_reset', kind=bool, default=True,
description="""
If set to ``False``, this will prevent WA from clearing package
data for this workload prior to running it.
"""),
]
def __init__(self, device, **kwargs): # pylint: disable=W0613
ApkWorkload.__init__(self, device, **kwargs)
ReventWorkload.__init__(self, device, _call_super=False, **kwargs)
if self.check_states:
state_detector.check_match_state_dependencies()
self.logcat_process = None
self.module_dir = os.path.dirname(sys.modules[self.__module__].__file__)
self.revent_dir = os.path.join(self.module_dir, 'revent_files')
def init_resources(self, context):
ApkWorkload.init_resources(self, context)
ReventWorkload.init_resources(self, context)
if self.check_states:
self._check_statedetection_files(context)
def setup(self, context):
ApkWorkload.setup(self, context)
self.logger.debug('Waiting for the game to load...')
time.sleep(self.loading_time)
ReventWorkload.setup(self, context)
# state detection check if it's enabled in the config
if self.check_states:
self.check_state(context, "setup_complete")
def do_post_install(self, context):
ApkWorkload.do_post_install(self, context)
self._deploy_assets(context, self.assets_push_timeout)
def reset(self, context):
# If saved state exists, restore it; if not, do full
# uninstall/install cycle.
self.device.execute('am force-stop {}'.format(self.package))
if self.saved_state_file:
self._deploy_resource_tarball(context, self.saved_state_file)
else:
if self.clear_data_on_reset:
self.device.execute('pm clear {}'.format(self.package))
self._deploy_assets(context)
def run(self, context):
ReventWorkload.run(self, context)
def teardown(self, context):
# state detection check if it's enabled in the config
if self.check_states:
self.check_state(context, "run_complete")
if not self.saved_state_file:
ApkWorkload.teardown(self, context)
else:
self.device.execute('am force-stop {}'.format(self.package))
ReventWorkload.teardown(self, context)
def _deploy_assets(self, context, timeout=300):
if self.asset_file:
self._deploy_resource_tarball(context, self.asset_file, timeout)
if self.saved_state_file: # must be deployed *after* asset tarball!
self._deploy_resource_tarball(context, self.saved_state_file, timeout)
def _deploy_resource_tarball(self, context, resource_file, timeout=300):
kind = 'data'
if ':' in resource_file:
kind, resource_file = resource_file.split(':', 1)
ondevice_cache = self.device.path.join(self.device.resource_cache, self.name, resource_file)
if not self.device.file_exists(ondevice_cache):
asset_tarball = context.resolver.get(ExtensionAsset(self, resource_file))
if not asset_tarball:
message = 'Could not find resource {} for workload {}.'
raise WorkloadError(message.format(resource_file, self.name))
# adb push will create intermediate directories if they don't
# exist.
self.device.push_file(asset_tarball, ondevice_cache, timeout=timeout)
device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind)
deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
self.device.busybox,
ondevice_cache)
self.device.execute(deploy_command, timeout=timeout, as_root=True)
def _check_statedetection_files(self, context):
try:
self.statedefs_dir = context.resolver.get(File(self, 'state_definitions'))
except ResourceError:
self.logger.warning("State definitions directory not found. Disabling state detection.")
self.check_states = False # pylint: disable=W0201
def check_state(self, context, phase):
try:
self.logger.info("\tChecking workload state...")
screenshotPath = os.path.join(context.output_directory, "screen.png")
self.device.capture_screen(screenshotPath)
stateCheck = state_detector.verify_state(screenshotPath, self.statedefs_dir, phase)
if not stateCheck:
raise WorkloadError("Unexpected state after setup")
except state_detector.StateDefinitionError as e:
msg = "State definitions or template files missing or invalid ({}). Skipping state detection."
self.logger.warning(msg.format(e.message))
| {
"content_hash": "dfc9b040ca17c3b4d9d83089ccb58d3f",
"timestamp": "",
"source": "github",
"line_count": 735,
"max_line_length": 111,
"avg_line_length": 45.42857142857143,
"alnum_prop": 0.6109014675052411,
"repo_name": "bjackman/workload-automation",
"id": "541b8b242c08218674227ff70bb82a96f6f174fe",
"size": "33976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wlauto/common/android/workload.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40003"
},
{
"name": "HTML",
"bytes": "243720"
},
{
"name": "Java",
"bytes": "226912"
},
{
"name": "JavaScript",
"bytes": "6578"
},
{
"name": "Jupyter Notebook",
"bytes": "1322"
},
{
"name": "Makefile",
"bytes": "430"
},
{
"name": "Python",
"bytes": "1555462"
},
{
"name": "Shell",
"bytes": "39222"
},
{
"name": "Vim script",
"bytes": "901"
}
],
"symlink_target": ""
} |
import urllib2
import json
import time
class GoogleFinanceAPI:
def __init__(self):
self.prefix = "http://finance.google.com/finance/info?client=ig&q="
def get(self,symbol,exchange):
url = self.prefix+"%s:%s"%(exchange,symbol)
u = urllib2.urlopen(url)
content = u.read()
obj = json.loads(content[3:])
return obj[0]
if __name__ == "__main__":
c = GoogleFinanceAPI()
while 1:
quote = c.get("MSFT","NASDAQ")
print quote
time.sleep(1)
| {
"content_hash": "19165b7e0117b2fee4461b544bf3fe91",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 17.64516129032258,
"alnum_prop": 0.5484460694698354,
"repo_name": "iamaris/pystock",
"id": "925070194a6b9e5310cae0cb76dd96fa7782b8ac",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "realtime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "1084945"
},
{
"name": "Python",
"bytes": "84674"
},
{
"name": "Shell",
"bytes": "220"
}
],
"symlink_target": ""
} |
import json
import os
from datetime import datetime, timedelta
from urllib.parse import urlencode
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.test import RequestFactory
from django.utils.encoding import force_text
from django.utils.translation import trim_whitespace
from unittest import mock
import pytest
import responses
from pyquery import PyQuery as pq
from waffle.testutils import override_switch
from olympia import amo, core
from olympia.accounts.views import API_TOKEN_COOKIE
from olympia.activity.models import ActivityLog
from olympia.addons.models import (
Addon, AddonCategory, AddonUser)
from olympia.amo.storage_utils import copy_stored_file
from olympia.amo.templatetags.jinja_helpers import (
format_date, url as url_reverse, urlparams)
from olympia.amo.tests import (
TestCase, addon_factory, user_factory, version_factory)
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.urlresolvers import reverse
from olympia.api.models import SYMMETRIC_JWT_TYPE, APIKey, APIKeyConfirmation
from olympia.applications.models import AppVersion
from olympia.constants.promoted import RECOMMENDED
from olympia.devhub.decorators import dev_required
from olympia.devhub.models import BlogPost
from olympia.devhub.views import get_next_version_number
from olympia.files.models import FileUpload
from olympia.files.tests.test_models import UploadTest as BaseUploadTest
from olympia.ratings.models import Rating
from olympia.translations.models import Translation, delete_translation
from olympia.users.models import IPNetworkUserRestriction, UserProfile
from olympia.users.tests.test_views import UserViewBase
from olympia.versions.models import (
ApplicationsVersions, Version, VersionPreview)
from olympia.zadmin.models import set_config
class HubTest(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(HubTest, self).setUp()
self.url = reverse('devhub.index')
assert self.client.login(email='[email protected]')
assert self.client.get(self.url).status_code == 200
self.user_profile = UserProfile.objects.get(id=999)
not_their_addon = addon_factory(users=[user_factory()])
AddonUser.unfiltered.create(
addon=not_their_addon,
user=self.user_profile,
role=amo.AUTHOR_ROLE_DELETED)
def clone_addon(self, num, addon_id=3615):
addons = []
source = Addon.objects.get(id=addon_id)
for i in range(num):
data = {
'type': source.type,
'status': source.status,
'name': 'cloned-addon-%s-%s' % (addon_id, i),
'users': [self.user_profile],
}
addons.append(addon_factory(**data))
return addons
class TestDashboard(HubTest):
def setUp(self):
super(TestDashboard, self).setUp()
self.url = reverse('devhub.addons')
self.themes_url = reverse('devhub.themes')
assert self.client.get(self.url).status_code == 200
self.addon = Addon.objects.get(pk=3615)
self.addon.addonuser_set.create(user=self.user_profile)
def test_addons_layout(self):
doc = pq(self.client.get(self.url).content)
assert doc('title').text() == (
'Manage My Submissions :: Developer Hub :: Add-ons for Firefox')
assert doc('.links-footer').length == 1
assert doc('#copyright').length == 1
assert doc('#footer-links .mobile-link').length == 0
def get_action_links(self, addon_id):
response = self.client.get(self.url)
doc = pq(response.content)
selector = '.item[data-addonid="%s"] .item-actions li > a' % addon_id
links = [a.text.strip() for a in doc(selector)]
return links
def test_no_addons(self):
"""Check that no add-ons are displayed for this user."""
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.item item').length == 0
def test_addon_pagination(self):
"""Check that the correct info. is displayed for each add-on:
namely, that add-ons are paginated at 10 items per page, and that
when there is more than one page, the 'Sort by' header and pagination
footer appear.
"""
# Create 10 add-ons. We going to make the existing one from the setUp
# and a static theme which shouldn't show up as an addon in this list.
addons = self.clone_addon(10)
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
doc = pq(response.content)
assert len(doc('.item .item-info')) == 10
assert len(doc('.item .info.extension')) == 10
assert doc('nav.paginator').length == 0
for addon in addons:
assert addon.get_icon_url(64) in doc('.item .info h3 a').html()
# Create 5 add-ons -have to change self.addon back to clone extensions.
self.addon.update(type=amo.ADDON_EXTENSION)
self.clone_addon(5)
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url, {'page': 2})
doc = pq(response.content)
assert len(doc('.item .item-info')) == 5
assert doc('nav.paginator').length == 1
def test_themes(self):
"""Check themes show on dashboard."""
# Create 2 themes.
staticthemes = []
for x in range(2):
addon = addon_factory(
type=amo.ADDON_STATICTHEME, users=[self.user_profile])
VersionPreview.objects.create(version=addon.current_version)
staticthemes.append(addon)
response = self.client.get(self.themes_url)
doc = pq(response.content)
assert len(doc('.item .item-info')) == 2
assert len(doc('.item .info.statictheme')) == 2
for addon in staticthemes:
assert addon.current_previews[0].thumbnail_url in [
img.attrib['src'] for img in doc('.info.statictheme h3 img')]
def test_show_hide_statistics_and_new_version_for_disabled(self):
# Not disabled: show statistics and new version links.
self.addon.update(disabled_by_user=False)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' in links, ('Unexpected: %r' % links)
assert 'New Version' in links, ('Unexpected: %r' % links)
# Disabled (user): hide new version link.
self.addon.update(disabled_by_user=True)
links = self.get_action_links(self.addon.pk)
assert 'New Version' not in links, ('Unexpected: %r' % links)
# Disabled (admin): hide statistics and new version links.
self.addon.update(disabled_by_user=False, status=amo.STATUS_DISABLED)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' not in links, ('Unexpected: %r' % links)
assert 'New Version' not in links, ('Unexpected: %r' % links)
def test_public_addon(self):
assert self.addon.status == amo.STATUS_APPROVED
doc = pq(self.client.get(self.url).content)
item = doc('.item[data-addonid="%s"]' % self.addon.id)
assert item.find('h3 a').attr('href') == self.addon.get_dev_url()
assert item.find('p.downloads'), 'Expected weekly downloads'
assert item.find('p.users'), 'Expected ADU'
assert item.find('.item-details'), 'Expected item details'
assert not item.find('p.incomplete'), (
'Unexpected message about incomplete add-on')
appver = self.addon.current_version.apps.all()[0]
appver.delete()
def test_dev_news(self):
for i in range(7):
bp = BlogPost(title='hi %s' % i,
date_posted=datetime.now() - timedelta(days=i))
bp.save()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.blog-posts').length == 1
assert doc('.blog-posts li').length == 5
assert doc('.blog-posts li a').eq(0).text() == "hi 0"
assert doc('.blog-posts li a').eq(4).text() == "hi 4"
def test_sort_created_filter(self):
response = self.client.get(self.url + '?sort=created')
doc = pq(response.content)
assert doc('.item-details').length == 1
elm = doc('.item-details .date-created')
assert elm.length == 1
assert elm.remove('strong').text() == (
format_date(self.addon.created))
def test_sort_updated_filter(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.item-details').length == 1
elm = doc('.item-details .date-updated')
assert elm.length == 1
assert elm.remove('strong').text() == (
trim_whitespace(
format_date(self.addon.last_updated)))
def test_purely_unlisted_addon_are_not_shown_as_incomplete(self):
self.make_addon_unlisted(self.addon)
assert self.addon.has_complete_metadata()
response = self.client.get(self.url)
doc = pq(response.content)
# It should not be considered incomplete despite having STATUS_NULL,
# since it's purely unlisted.
assert not doc('.incomplete')
# Rest of the details should be shown, but not the AMO-specific stuff.
assert not doc('.item-info')
assert doc('.item-details')
def test_mixed_versions_addon_with_incomplete_metadata(self):
self.make_addon_unlisted(self.addon)
version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
version.update(license=None)
self.addon.reload()
assert not self.addon.has_complete_metadata()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.incomplete').text() == (
'This add-on is missing some required information before it can be'
' submitted for publication.')
assert doc('form.resume').attr('action') == (
url_reverse('devhub.request-review', self.addon.slug))
assert doc('button.link').text() == 'Resume'
def test_no_versions_addon(self):
self.addon.current_version.delete()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.incomplete').text() == (
"This add-on doesn't have any versions.")
class TestUpdateCompatibility(TestCase):
fixtures = ['base/users', 'base/addon_4594_a9', 'base/addon_3615']
def setUp(self):
super(TestUpdateCompatibility, self).setUp()
assert self.client.login(email='[email protected]')
self.url = reverse('devhub.addons')
# These aren't realistic but work with existing tests and the 3615
# addon
self.create_appversion('android', '3.7a1pre')
self.create_appversion('android', '4.0')
def create_appversion(self, name, version):
return AppVersion.objects.create(
application=amo.APPS[name].id, version=version)
def test_no_compat(self):
self.client.logout()
assert self.client.login(email='[email protected]')
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.item[data-addonid="4594"] li.compat')
addon = Addon.objects.get(pk=4594)
response = self.client.get(
reverse('devhub.ajax.compat.update',
args=[addon.slug, addon.current_version.id]))
assert response.status_code == 404
response = self.client.get(
reverse('devhub.ajax.compat.status', args=[addon.slug]))
assert response.status_code == 404
def test_compat(self):
addon = Addon.objects.get(pk=3615)
response = self.client.get(self.url)
doc = pq(response.content)
cu = doc('.item[data-addonid="3615"] .tooltip.compat-update')
assert not cu
addon.current_version.files.update(strict_compatibility=True)
response = self.client.get(self.url)
doc = pq(response.content)
cu = doc('.item[data-addonid="3615"] .tooltip.compat-update')
assert cu
update_url = reverse('devhub.ajax.compat.update',
args=[addon.slug, addon.current_version.id])
assert cu.attr('data-updateurl') == update_url
status_url = reverse('devhub.ajax.compat.status', args=[addon.slug])
selector = '.item[data-addonid="3615"] li.compat'
assert doc(selector).attr('data-src') == status_url
assert doc('.item[data-addonid="3615"] .compat-update-modal')
def test_incompat_firefox(self):
addon = Addon.objects.get(pk=3615)
addon.current_version.files.update(strict_compatibility=True)
versions = ApplicationsVersions.objects.all()[0]
versions.max = AppVersion.objects.get(version='2.0')
versions.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid="3615"] .tooltip.compat-error')
def test_incompat_android(self):
addon = Addon.objects.get(pk=3615)
addon.current_version.files.update(strict_compatibility=True)
appver = AppVersion.objects.get(version='2.0')
appver.update(application=amo.ANDROID.id)
av = ApplicationsVersions.objects.all()[0]
av.application = amo.ANDROID.id
av.max = appver
av.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid="3615"] .tooltip.compat-error')
class TestDevRequired(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDevRequired, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.edit_page_url = self.addon.get_dev_url('edit')
self.get_url = self.addon.get_dev_url('versions')
self.post_url = self.addon.get_dev_url('delete')
assert self.client.login(email='[email protected]')
self.au = self.addon.addonuser_set.get(user__email='[email protected]')
assert self.au.role == amo.AUTHOR_ROLE_OWNER
def test_anon(self):
self.client.logout()
self.assertLoginRedirects(self.client.get(self.get_url), self.get_url)
self.assertLoginRedirects(self.client.get(
self.edit_page_url), self.edit_page_url)
def test_dev_get(self):
assert self.client.get(self.get_url).status_code == 200
assert self.client.get(self.edit_page_url).status_code == 200
def test_dev_post(self):
self.assert3xx(self.client.post(self.post_url), self.get_url)
def test_disabled_post_dev(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.post(self.get_url).status_code == 403
def test_disabled_post_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='[email protected]')
self.assert3xx(self.client.post(self.post_url), self.get_url)
class TestVersionStats(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersionStats, self).setUp()
assert self.client.login(email='[email protected]')
def test_counts(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
user = UserProfile.objects.get(email='[email protected]')
for _ in range(10):
Rating.objects.create(addon=addon, user=user,
version=addon.current_version)
url = reverse('devhub.versions.stats', args=[addon.slug])
data = json.loads(force_text(self.client.get(url).content))
exp = {str(version.id):
{'reviews': 10, 'files': 1, 'version': version.version,
'id': version.id}}
self.assertDictEqual(data, exp)
class TestDelete(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestDelete, self).setUp()
self.get_addon = lambda: Addon.objects.filter(id=3615)
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.get_url = lambda: self.get_addon()[0].get_dev_url('delete')
def test_post_not(self):
response = self.client.post(self.get_url(), follow=True)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Add-on was not deleted.')
assert self.get_addon().exists()
self.assert3xx(response, self.get_addon()[0].get_dev_url('versions'))
def test_post(self):
self.get_addon().get().update(slug='addon-slug')
response = self.client.post(self.get_url(), {'slug': 'addon-slug'},
follow=True)
assert pq(response.content)('.notification-box').text() == (
'Add-on deleted.')
assert not self.get_addon().exists()
self.assert3xx(response, reverse('devhub.addons'))
def test_post_wrong_slug(self):
self.get_addon().get().update(slug='addon-slug')
response = self.client.post(self.get_url(), {'slug': 'theme-slug'},
follow=True)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Add-on was not deleted.')
assert self.get_addon().exists()
self.assert3xx(response, self.get_addon()[0].get_dev_url('versions'))
def test_post_statictheme(self):
theme = addon_factory(
name='xpi name', type=amo.ADDON_STATICTHEME, slug='stheme-slug',
users=[self.user])
response = self.client.post(
theme.get_dev_url('delete'), {'slug': 'stheme-slug'}, follow=True)
assert pq(response.content)('.notification-box').text() == (
'Theme deleted.')
assert not Addon.objects.filter(id=theme.id).exists()
self.assert3xx(response, reverse('devhub.themes'))
def test_post_statictheme_wrong_slug(self):
theme = addon_factory(
name='xpi name', type=amo.ADDON_STATICTHEME, slug='stheme-slug',
users=[self.user])
response = self.client.post(
theme.get_dev_url('delete'), {'slug': 'foo-slug'}, follow=True)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Theme was not deleted.')
assert Addon.objects.filter(id=theme.id).exists()
self.assert3xx(response, theme.get_dev_url('versions'))
class TestHome(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestHome, self).setUp()
assert self.client.login(email='[email protected]')
self.url = reverse('devhub.index')
self.addon = Addon.objects.get(pk=3615)
def get_pq(self):
response = self.client.get(self.url)
assert response.status_code == 200
return pq(response.content)
def test_basic_logged_out(self):
self.client.logout()
response = self.client.get(self.url)
assert response.status_code == 200
self.assertTemplateUsed(response, 'devhub/index.html')
assert b'Customize Firefox' in response.content
def test_default_lang_selected(self):
self.client.logout()
doc = self.get_pq()
selected_value = doc('#language option:selected').attr('value')
assert selected_value == 'en-us'
def test_basic_logged_in(self):
response = self.client.get(self.url)
assert response.status_code == 200
self.assertTemplateUsed(response, 'devhub/index.html')
assert b'My Add-ons' in response.content
def test_my_addons_addon_versions_link(self):
assert self.client.login(email='[email protected]')
doc = self.get_pq()
addon_list = doc('.DevHub-MyAddons-list')
href = addon_list.find('.DevHub-MyAddons-item-versions a').attr('href')
assert href == self.addon.get_dev_url('versions')
def test_my_addons(self):
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW,
'Awaiting Review'),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW,
'Approved'),
(amo.STATUS_DISABLED, amo.STATUS_APPROVED,
'Disabled by Mozilla')]
latest_version = self.addon.find_latest_version(
amo.RELEASE_CHANNEL_LISTED)
latest_file = latest_version.files.all()[0]
for addon_status, file_status, status_str in statuses:
latest_file.update(status=file_status)
self.addon.update(status=addon_status)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert (
addon_item.find('.DevHub-MyAddons-item-edit').attr('href') ==
self.addon.get_dev_url('edit'))
if self.addon.type != amo.ADDON_STATICTHEME:
assert self.addon.get_icon_url(64) in addon_item.html()
else:
assert self.addon.current_previews[0].thumbnail_url in (
addon_item.html())
assert (
status_str ==
addon_item.find('.DevHub-MyAddons-VersionStatus').text())
Addon.objects.all().delete()
assert self.get_pq()(
'.DevHub-MyAddons-list .DevHub-MyAddons-item').length == 0
def test_my_addons_recommended(self):
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
latest_version = self.addon.find_latest_version(
amo.RELEASE_CHANNEL_LISTED)
latest_file = latest_version.files.all()[0]
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW,
'Awaiting Review'),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW,
'Approved and Recommended'),
(amo.STATUS_DISABLED, amo.STATUS_APPROVED,
'Disabled by Mozilla')]
for addon_status, file_status, status_str in statuses:
latest_file.update(status=file_status)
self.addon.update(status=addon_status)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert (
addon_item.find('.DevHub-MyAddons-item-edit').attr('href') ==
self.addon.get_dev_url('edit'))
if self.addon.type != amo.ADDON_STATICTHEME:
assert self.addon.get_icon_url(64) in addon_item.html()
else:
assert self.addon.current_previews[0].thumbnail_url in (
addon_item.html())
assert (
status_str ==
addon_item.find('.DevHub-MyAddons-VersionStatus').text())
Addon.objects.all().delete()
assert self.get_pq()(
'.DevHub-MyAddons-list .DevHub-MyAddons-item').length == 0
def test_my_addons_with_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
VersionPreview.objects.create(version=self.addon.current_version)
self.test_my_addons()
def test_my_addons_incomplete(self):
self.addon.update(status=amo.STATUS_NULL)
# Make add-on incomplete
AddonCategory.objects.filter(addon=self.addon).delete()
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert (
addon_item.find('.DevHub-MyAddons-item-edit').attr('href') ==
self.addon.get_dev_url('edit'))
def test_my_addons_no_disabled_or_deleted(self):
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert (
addon_item.find('.DevHub-MyAddons-VersionStatus').text() ==
'Invisible')
class TestActivityFeed(TestCase):
fixtures = ('base/users', 'base/addon_3615')
def setUp(self):
super(TestActivityFeed, self).setUp()
assert self.client.login(email='[email protected]')
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.versions.first()
self.action_user = UserProfile.objects.get(
email='[email protected]')
def test_feed_for_all(self):
response = self.client.get(reverse('devhub.feed_all'))
assert response.status_code == 200
doc = pq(response.content)
assert doc('header h2').text() == 'Recent Activity for My Add-ons'
def test_feed_for_addon(self):
response = self.client.get(
reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 200
doc = pq(response.content)
assert doc('header h2').text() == (
'Recent Activity for %s' % self.addon.name)
def test_feed_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(
reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 200
def test_feed_disabled_anon(self):
self.client.logout()
response = self.client.get(
reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 302
def add_log(self, action=amo.LOG.ADD_RATING):
core.set_user(self.action_user)
ActivityLog.create(action, self.addon, self.version)
def add_hidden_log(self, action=amo.LOG.COMMENT_VERSION):
self.add_log(action=action)
def test_feed_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
assert len(doc('#recent-activity li.item')) == 0
def test_addons_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
assert len(doc('.recent-activity li.item')) == 0
def test_unlisted_addons_dashboard(self):
"""Unlisted addons are displayed in the feed on the dashboard page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
assert len(doc('.recent-activity li.item')) == 1
def test_unlisted_addons_feed_sidebar(self):
"""Unlisted addons are displayed in the left side in the feed page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
# First li is "All My Add-ons".
assert len(doc('#refine-addon li')) == 2
def test_unlisted_addons_feed(self):
"""Unlisted addons are displayed in the feed page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
assert len(doc('#recent-activity .item')) == 1
def test_unlisted_addons_feed_filter(self):
"""Feed page can be filtered on unlisted addon."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
assert len(doc('#recent-activity .item')) == 1
def test_reviewer_name_is_used_for_reviewer_actions(self):
self.action_user.update(display_name='HîdeMe', reviewer_name='ShöwMe')
self.add_log(action=amo.LOG.APPROVE_VERSION)
response = self.client.get(
reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(response.content)
assert len(doc('#recent-activity .item')) == 1
content = force_text(response.content)
assert self.action_user.reviewer_name in content
assert self.action_user.name not in content
def test_regular_name_is_used_for_non_reviewer_actions(self):
# Fields are inverted compared to the test above.
self.action_user.update(reviewer_name='HîdeMe', display_name='ShöwMe')
self.add_log(action=amo.LOG.ADD_RATING) # not a reviewer action.
response = self.client.get(
reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(response.content)
assert len(doc('#recent-activity .item')) == 1
content = force_text(response.content)
# Assertions are inverted compared to the test above.
assert self.action_user.reviewer_name not in content
assert self.action_user.name in content
def test_addons_dashboard_name(self):
self.add_log()
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
timestamp = doc('.recent-activity li.item span.activity-timestamp')
assert len(timestamp) == 1
assert self.action_user.name
assert self.action_user.name in timestamp.html()
assert '<a href=' not in timestamp.html()
def test_addons_dashboard_reviewer_name(self):
self.action_user.update(reviewer_name='bob')
self.add_log(action=amo.LOG.APPROVE_VERSION)
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
timestamp = doc('.recent-activity li.item span.activity-timestamp')
assert len(timestamp) == 1
assert self.action_user.name
assert self.action_user.name not in timestamp.html()
assert self.action_user.reviewer_name in timestamp.html()
assert '<a href=' not in timestamp.html()
class TestAPIAgreement(TestCase):
fixtures = ['base/addon_3615', 'base/addon_5579', 'base/users']
def setUp(self):
super(TestAPIAgreement, self).setUp()
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.user.update(last_login_ip='192.168.1.1')
def test_agreement_read(self):
self.user.update(read_dev_agreement=self.days_ago(0))
response = self.client.get(reverse('devhub.api_key_agreement'))
self.assert3xx(response, reverse('devhub.api_key'))
def test_agreement_unread_captcha_inactive(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert 'recaptcha' not in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha') == []
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_unread_captcha_active(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert 'recaptcha' in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha')
def test_agreement_submit_success(self):
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.api_key_agreement'), data={
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 302
assert response['Location'] == reverse('devhub.api_key')
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_submit_captcha_active_error(self):
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.api_key_agreement'))
# Captcha is properly rendered
doc = pq(response.content)
assert doc('.g-recaptcha')
assert 'recaptcha' in response.context['agreement_form'].errors
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_submit_captcha_active_success(self):
self.user.update(read_dev_agreement=None)
verify_data = urlencode({
'secret': '',
'remoteip': '127.0.0.1',
'response': 'test',
})
responses.add(
responses.GET,
'https://www.google.com/recaptcha/api/siteverify?' + verify_data,
json={'error-codes': [], 'success': True})
response = self.client.post(reverse('devhub.api_key_agreement'), data={
'g-recaptcha-response': 'test',
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 302
assert response['Location'] == reverse('devhub.api_key')
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
def test_agreement_read_but_too_long_ago(self):
set_config('last_dev_agreement_change_date', '2018-01-01 12:00')
before_agreement_last_changed = (datetime(2018, 1, 1, 12, 0) -
timedelta(days=1))
self.user.update(read_dev_agreement=before_agreement_last_changed)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
@mock.patch(
'olympia.devhub.utils.UploadRestrictionChecker.is_submission_allowed')
def test_cant_submit_agreement_if_restricted(
self, is_submission_allowed_mock):
is_submission_allowed_mock.return_value = False
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.api_key_agreement'), data={
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 200
assert response.context['agreement_form'].is_valid() is False
self.user.reload()
assert self.user.read_dev_agreement is None
assert is_submission_allowed_mock.call_count == 2
# First call is from the form, and it's not checking the agreement,
# it's just to see if the user is restricted.
assert is_submission_allowed_mock.call_args_list[0] == (
(), {'check_dev_agreement': False}
)
# Second call is from the view itself, no arguments
assert is_submission_allowed_mock.call_args_list[1] == ((), {})
def test_cant_submit_agreement_if_restricted_functional(self):
# Like test_cant_submit_agreement_if_restricted() but with no mocks,
# picking a single restriction and making sure it's working properly.
IPNetworkUserRestriction.objects.create(network='127.0.0.1/32')
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.api_key_agreement'), data={
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 200
assert response.context['agreement_form'].is_valid() is False
doc = pq(response.content)
assert doc('.addon-submission-process').text() == (
'Multiple add-ons violating our policies have been submitted '
'from your location. The IP address has been blocked.\n'
'More information on Developer Accounts'
)
@mock.patch(
'olympia.devhub.utils.UploadRestrictionChecker.is_submission_allowed')
def test_agreement_page_shown_if_restricted(
self, is_submission_allowed_mock):
# Like test_agreement_read() above, but with a restricted user: they
# are shown the agreement page again instead of redirecting to the
# api keys page.
is_submission_allowed_mock.return_value = False
self.user.update(read_dev_agreement=self.days_ago(0))
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
class TestAPIKeyPage(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestAPIKeyPage, self).setUp()
self.url = reverse('devhub.api_key')
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.user.update(last_login_ip='192.168.1.1')
def test_key_redirect(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key'))
self.assert3xx(response, reverse('devhub.api_key_agreement'))
def test_redirect_if_restricted(self):
IPNetworkUserRestriction.objects.create(network='127.0.0.1/32')
response = self.client.get(reverse('devhub.api_key'))
self.assert3xx(response, reverse('devhub.api_key_agreement'))
def test_view_without_credentials_not_confirmed_yet(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Generate new credentials'
inputs = doc('.api-input input')
assert len(inputs) == 0, 'Inputs should be absent before keys exist'
assert not doc('input[name=confirmation_token]')
def test_view_with_credentials(self):
APIKey.objects.create(user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Revoke and regenerate credentials'
assert doc('#revoke-key').text() == 'Revoke'
key_input = doc('.key-input input').val()
assert key_input == 'some-jwt-key'
def test_view_without_credentials_confirmation_requested_no_token(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=False)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Since confirmation has already been requested, there shouldn't be
# any buttons on the page if no token was passed in the URL - the user
# needs to follow the link in the email to continue.
assert not doc('input[name=confirmation_token]')
assert not doc('input[name=action]')
def test_view_without_credentials_confirmation_requested_with_token(self):
APIKeyConfirmation.objects.create(
user=self.user, token='secrettoken', confirmed_once=False)
self.url += '?token=secrettoken'
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('input[name=confirmation_token]')) == 1
token_input = doc('input[name=confirmation_token]')[0]
assert token_input.value == 'secrettoken'
submit = doc('#generate-key')
assert submit.text() == 'Confirm and generate new credentials'
def test_view_no_credentials_has_been_confirmed_once(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=True)
# Should look similar to when there are no credentials and no
# confirmation has been requested yet, the post action is where it
# will differ.
self.test_view_without_credentials_not_confirmed_yet()
def test_create_new_credentials_has_been_confirmed_once(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=True)
patch = mock.patch('olympia.devhub.views.APIKey.new_jwt_credentials')
with patch as mock_creator:
response = self.client.post(self.url, data={'action': 'generate'})
mock_creator.assert_called_with(self.user)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert message.to == [self.user.email]
assert message.subject == 'New API key created'
assert reverse('devhub.api_key') in message.body
self.assert3xx(response, self.url)
def test_create_new_credentials_confirming_with_token(self):
confirmation = APIKeyConfirmation.objects.create(
user=self.user, token='secrettoken', confirmed_once=False)
patch = mock.patch('olympia.devhub.views.APIKey.new_jwt_credentials')
with patch as mock_creator:
response = self.client.post(self.url, data={
'action': 'generate', 'confirmation_token': 'secrettoken'
})
mock_creator.assert_called_with(self.user)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert message.to == [self.user.email]
assert message.subject == 'New API key created'
assert reverse('devhub.api_key') in message.body
confirmation.reload()
assert confirmation.confirmed_once
self.assert3xx(response, self.url)
def test_create_new_credentials_not_confirmed_yet(self):
assert not APIKey.objects.filter(user=self.user).exists()
assert not APIKeyConfirmation.objects.filter(user=self.user).exists()
response = self.client.post(self.url, data={'action': 'generate'})
self.assert3xx(response, self.url)
# Since there was no credentials are no confirmation yet, this should
# create a confirmation, send an email with the token, but not create
# credentials yet.
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert message.to == [self.user.email]
assert not APIKey.objects.filter(user=self.user).exists()
assert APIKeyConfirmation.objects.filter(user=self.user).exists()
confirmation = APIKeyConfirmation.objects.filter(user=self.user).get()
assert confirmation.token
assert not confirmation.confirmed_once
token = confirmation.token
expected_url = (
f'http://testserver/en-US/developers/addon/api/key/?token={token}'
)
assert message.subject == 'Confirmation for developer API keys'
assert expected_url in message.body
def test_create_new_credentials_confirmation_exists_no_token_passed(self):
confirmation = APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=False)
response = self.client.post(self.url, data={'action': 'generate'})
assert len(mail.outbox) == 0
assert not APIKey.objects.filter(user=self.user).exists()
confirmation.reload()
assert not confirmation.confirmed_once # Unchanged
self.assert3xx(response, self.url)
def test_create_new_credentials_confirmation_exists_token_is_wrong(self):
confirmation = APIKeyConfirmation.objects.create(
user=self.user, token='sometoken', confirmed_once=False)
response = self.client.post(self.url, data={
'action': 'generate', 'confirmation_token': 'wrong'
})
# Nothing should have happened, the user will just be redirect to the
# page.
assert len(mail.outbox) == 0
assert not APIKey.objects.filter(user=self.user).exists()
confirmation.reload()
assert not confirmation.confirmed_once
self.assert3xx(response, self.url)
def test_delete_and_recreate_credentials_has_been_confirmed_once(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=True)
old_key = APIKey.objects.create(user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret')
response = self.client.post(self.url, data={'action': 'generate'})
self.assert3xx(response, self.url)
old_key = APIKey.objects.get(pk=old_key.pk)
assert old_key.is_active is None
new_key = APIKey.get_jwt_key(user=self.user)
assert new_key.key != old_key.key
assert new_key.secret != old_key.secret
def test_delete_and_recreate_credentials_has_not_been_confirmed_yet(self):
old_key = APIKey.objects.create(user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret')
response = self.client.post(self.url, data={'action': 'generate'})
self.assert3xx(response, self.url)
old_key = APIKey.objects.get(pk=old_key.pk)
assert old_key.is_active is None
# Since there was no confirmation, this should create a one, send an
# email with the token, but not create credentials yet. (Would happen
# for an user that had api keys from before we introduced confirmation
# mechanism, but decided to regenerate).
assert len(mail.outbox) == 2 # 2 because of key revocation email.
assert 'revoked' in mail.outbox[0].body
message = mail.outbox[1]
assert message.to == [self.user.email]
assert not APIKey.objects.filter(
user=self.user, is_active=True).exists()
assert APIKeyConfirmation.objects.filter(user=self.user).exists()
confirmation = APIKeyConfirmation.objects.filter(user=self.user).get()
assert confirmation.token
assert not confirmation.confirmed_once
token = confirmation.token
expected_url = (
f'http://testserver/en-US/developers/addon/api/key/?token={token}'
)
assert message.subject == 'Confirmation for developer API keys'
assert expected_url in message.body
def test_delete_credentials(self):
old_key = APIKey.objects.create(user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret')
response = self.client.post(self.url, data={'action': 'revoke'})
self.assert3xx(response, self.url)
old_key = APIKey.objects.get(pk=old_key.pk)
assert old_key.is_active is None
assert len(mail.outbox) == 1
assert 'revoked' in mail.outbox[0].body
class TestUpload(BaseUploadTest):
fixtures = ['base/users']
def setUp(self):
super(TestUpload, self).setUp()
assert self.client.login(email='[email protected]')
self.url = reverse('devhub.upload')
self.image_path = get_image_path('animated.png')
def post(self, **kwargs):
# Has to be a binary, non xpi file.
data = open(self.image_path, 'rb')
return self.client.post(self.url, {'upload': data}, **kwargs)
def test_login_required(self):
self.client.logout()
response = self.post()
assert response.status_code == 302
def test_create_fileupload(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
assert 'animated.png' in upload.name
data = open(self.image_path, 'rb').read()
assert storage.open(upload.path).read() == data
def test_fileupload_metadata(self):
user = UserProfile.objects.get(email='[email protected]')
self.client.login(email=user.email)
self.post(REMOTE_ADDR='4.8.15.16.23.42')
upload = FileUpload.objects.get()
assert upload.user == user
assert upload.source == amo.UPLOAD_SOURCE_DEVHUB
assert upload.ip_address == '4.8.15.16.23.42'
def test_fileupload_validation(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
assert upload.validation
validation = json.loads(upload.validation)
assert not validation['success']
# The current interface depends on this JSON structure:
assert validation['errors'] == 1
assert validation['warnings'] == 0
assert len(validation['messages'])
msg = validation['messages'][0]
assert msg['type'] == u'error'
assert msg['message'] == (
u'Unsupported file type, please upload a supported file '
'(.crx, .xpi, .xml, .zip).')
assert not msg['description']
def test_redirect(self):
response = self.post()
upload = FileUpload.objects.get()
url = reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
self.assert3xx(response, url)
def test_not_an_uuid(self):
url = reverse('devhub.upload_detail', args=['garbage', 'json'])
response = self.client.get(url)
assert response.status_code == 404
@mock.patch('olympia.devhub.tasks.validate')
def test_upload_unlisted_addon(self, validate_mock):
"""Unlisted addons are validated as "self hosted" addons."""
validate_mock.return_value = json.dumps(amo.VALIDATOR_SKELETON_RESULTS)
self.url = reverse('devhub.upload_unlisted')
self.post()
# Make sure it was called with listed=False.
assert not validate_mock.call_args[1]['listed']
class TestUploadDetail(BaseUploadTest):
fixtures = ['base/appversion', 'base/users']
@classmethod
def setUpTestData(cls):
versions = {
'51.0a1',
amo.DEFAULT_WEBEXT_MIN_VERSION,
amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID,
amo.DEFAULT_WEBEXT_MAX_VERSION
}
for version in versions:
cls.create_appversion('firefox', version)
cls.create_appversion('android', version)
def setUp(self):
super(TestUploadDetail, self).setUp()
assert self.client.login(email='[email protected]')
@classmethod
def create_appversion(cls, application_name, version):
return AppVersion.objects.create(
application=amo.APPS[application_name].id, version=version)
def post(self):
# Has to be a binary, non xpi file.
data = open(get_image_path('animated.png'), 'rb')
return self.client.post(reverse('devhub.upload'), {'upload': data})
def validation_ok(self):
return {
'errors': 0,
'success': True,
'warnings': 0,
'notices': 0,
'message_tree': {},
'messages': [],
'rejected': False,
'metadata': {}}
def upload_file(self, file, url='devhub.upload'):
addon = os.path.join(
settings.ROOT, 'src', 'olympia', 'devhub', 'tests', 'addons', file)
with open(addon, 'rb') as f:
response = self.client.post(
reverse(url), {'upload': f})
assert response.status_code == 302
def test_detail_json(self):
self.post()
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['validation']['errors'] == 1
assert data['url'] == (
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json']))
assert data['full_report_url'] == (
reverse('devhub.upload_detail', args=[upload.uuid.hex]))
# We must have tiers
assert len(data['validation']['messages'])
msg = data['validation']['messages'][0]
assert msg['tier'] == 1
def test_upload_detail_for_version(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory()
addon.addonuser_set.create(user=user)
self.post()
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex]))
assert response.status_code == 200
def test_upload_detail_for_version_not_an_uuid(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory()
addon.addonuser_set.create(user=user)
url = reverse(
'devhub.upload_detail_for_version', args=[addon.slug, 'garbage'])
response = self.client.get(url)
assert response.status_code == 404
def test_upload_detail_for_version_unlisted(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory(
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
addon.addonuser_set.create(user=user)
self.post()
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex]))
assert response.status_code == 200
def test_upload_detail_for_version_deleted(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory()
addon.addonuser_set.create(user=user)
addon.delete()
self.post()
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex]))
assert response.status_code == 404
def test_detail_view(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex]))
assert response.status_code == 200
doc = pq(response.content)
expected = 'Validation Results for animated.png'
assert doc('header h2').text() == expected
suite = doc('#addon-validator-suite')
expected = reverse(
'devhub.standalone_upload_detail',
args=[upload.uuid.hex])
assert suite.attr('data-validateurl') == expected
def test_not_an_uuid_standalon_upload_detail(self):
url = reverse('devhub.standalone_upload_detail', args=['garbage'])
response = self.client.get(url)
assert response.status_code == 404
def test_no_servererror_on_missing_version(self):
"""https://github.com/mozilla/addons-server/issues/3779
addons-linter and amo-validator both add proper errors if the version
is missing but we shouldn't fail on that but properly show the
validation results.
"""
self.upload_file('valid_webextension_no_version.xpi')
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
message = [(m['message'], m.get('type') == 'error')
for m in data['validation']['messages']]
expected = [(u'"/version" is a required property', True)]
assert message == expected
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_not_a_valid_xpi(self, run_addons_linter_mock):
run_addons_linter_mock.return_value = json.dumps(self.validation_ok())
self.upload_file('unopenable.xpi')
# We never even reach the linter (we can't: because we're repacking
# zip files, we should raise an error if the zip is invalid before
# calling the linter, even though the linter has a perfectly good error
# message for this kind of situation).
assert not run_addons_linter_mock.called
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
message = [(m['message'], m.get('fatal', False))
for m in data['validation']['messages']]
# We do raise a specific error message explaining that the archive is
# not valid instead of a generic exception.
assert message == [
('Invalid or corrupt add-on file.', True),
]
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_experiment_xpi_allowed(self, mock_validator):
user = UserProfile.objects.get(email='[email protected]')
self.grant_permission(user, 'Experiments:submit')
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file(
'../../../files/fixtures/files/experiment_inside_webextension.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
assert data['validation']['messages'] == []
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_experiment_xpi_not_allowed(self, mock_validator):
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file(
'../../../files/fixtures/files/experiment_inside_webextension.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
assert data['validation']['messages'] == [
{u'tier': 1, u'message': u'You cannot submit this type of add-on',
u'fatal': True, u'type': u'error'}]
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_system_addon_allowed(self, mock_validator):
user = user_factory()
self.grant_permission(user, 'SystemAddon:Submit')
assert self.client.login(email=user.email)
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file(
'../../../files/fixtures/files/mozilla_guid.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
assert data['validation']['messages'] == []
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_system_addon_not_allowed_not_allowed(self, mock_validator):
user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file(
'../../../files/fixtures/files/mozilla_guid.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
assert data['validation']['messages'] == [
{'tier': 1,
'message': 'You cannot submit an add-on using an ID ending with '
'"@mozilla.com" or "@mozilla.org" or '
'"@pioneer.mozilla.org" or "@search.mozilla.org" or '
'"@shield.mozilla.com" or "@shield.mozilla.org" or '
'"@mozillaonline.com"',
'fatal': True, 'type': 'error'}]
@mock.patch('olympia.devhub.tasks.run_addons_linter')
@mock.patch('olympia.files.utils.get_signer_organizational_unit_name')
def test_mozilla_signed_allowed(self, mock_get_signature, mock_validator):
user = user_factory()
assert self.client.login(email=user.email)
self.grant_permission(user, 'SystemAddon:Submit')
mock_validator.return_value = json.dumps(self.validation_ok())
mock_get_signature.return_value = "Mozilla Extensions"
self.upload_file(
'../../../files/fixtures/files/webextension_signed_already.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
assert data['validation']['messages'] == []
@mock.patch('olympia.files.utils.get_signer_organizational_unit_name')
def test_mozilla_signed_not_allowed_not_allowed(self, mock_get_signature):
user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
mock_get_signature.return_value = 'Mozilla Extensions'
self.upload_file(
'../../../files/fixtures/files/webextension_signed_already.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
assert data['validation']['messages'] == [
{u'tier': 1,
u'message': u'You cannot submit a Mozilla Signed Extension',
u'fatal': True, u'type': u'error'}]
def test_legacy_mozilla_signed_fx57_compat_allowed(self):
"""Legacy add-ons that are signed with the mozilla certificate
should be allowed to be submitted ignoring most compatibility
checks.
See https://github.com/mozilla/addons-server/issues/6424 for more
information.
We also don't call amo-validator on them but we issue a warning
about being legacy add-ons.
"""
user = user_factory()
self.grant_permission(user, 'SystemAddon:Submit')
assert self.client.login(email=user.email)
self.upload_file(os.path.join(
settings.ROOT, 'src', 'olympia', 'files', 'fixtures', 'files',
'legacy-addon-already-signed-0.1.0.xpi'))
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
msg = data['validation']['messages'][0]
assert msg['id'] == [
'validation', 'messages', 'legacy_addons_unsupported']
assert msg['type'] == 'warning'
assert msg['message'] == (
u'Legacy extensions are no longer supported in Firefox.')
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_system_addon_update_allowed(self, mock_validator):
"""Updates to system addons are allowed from anyone."""
user = user_factory(email='[email protected]')
addon = addon_factory(guid='[email protected]')
AddonUser.objects.create(addon=addon, user=user)
assert self.client.login(email='[email protected]')
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file(
'../../../files/fixtures/files/mozilla_guid.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex]))
data = json.loads(force_text(response.content))
assert data['validation']['messages'] == []
def test_legacy_langpacks_disallowed(self):
self.upload_file(
'../../../files/fixtures/files/langpack.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid.hex, 'json']))
data = json.loads(force_text(response.content))
assert data['validation']['messages'][0]['id'] == [
u'validation', u'messages', u'legacy_addons_unsupported'
]
def test_no_redirect_for_metadata(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=addon).delete()
addon.addonuser_set.create(user=user)
self.post()
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex]))
assert response.status_code == 200
def assert_json_error(request, field, msg):
assert request.status_code == 400
assert request['Content-Type'] == 'application/json'
field = '__all__' if field is None else field
content = json.loads(request.content)
assert field in content, '%r not in %r' % (field, content)
assert content[field] == [msg]
def assert_json_field(request, field, msg):
assert request.status_code == 200
assert request['Content-Type'] == 'application/json'
content = json.loads(request.content)
assert field in content, '%r not in %r' % (field, content)
assert content[field] == msg
class TestQueuePosition(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestQueuePosition, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.addon.update(guid='guid@xpi')
assert self.client.login(email='[email protected]')
self.edit_url = reverse('devhub.versions.edit',
args=[self.addon.slug, self.version.id])
version_files = self.version.files.all()[0]
version_files.platform = amo.PLATFORM_LINUX.id
version_files.save()
# Add a second one also awaiting review in each queue
addon_factory(
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# And some static themes that shouldn't be counted
addon_factory(
status=amo.STATUS_NOMINATED, type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
addon_factory(
status=amo.STATUS_NOMINATED, type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
def test_not_in_queue(self):
response = self.client.get(self.addon.get_dev_url('versions'))
assert self.addon.status == amo.STATUS_APPROVED
assert (
pq(response.content)('.version-status-actions .dark').length == 0)
def test_in_queue(self):
statuses = [(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW)]
for (addon_status, file_status) in statuses:
latest_version = self.addon.find_latest_version(
amo.RELEASE_CHANNEL_LISTED)
latest_version.files.all()[0].update(status=file_status)
self.addon.update(status=addon_status)
response = self.client.get(self.addon.get_dev_url('versions'))
doc = pq(response.content)
span = doc('.queue-position')
assert span.length
assert "Queue Position: 1 of 2" in span.text()
def test_static_themes_in_queue(self):
statuses = [(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW)]
self.addon.update(type=amo.ADDON_STATICTHEME)
for (addon_status, file_status) in statuses:
latest_version = self.addon.find_latest_version(
amo.RELEASE_CHANNEL_LISTED)
latest_version.files.all()[0].update(status=file_status)
self.addon.update(status=addon_status)
response = self.client.get(self.addon.get_dev_url('versions'))
doc = pq(response.content)
span = doc('.queue-position')
assert span.length
assert "Queue Position: 1 of 3" in span.text()
class TestVersionXSS(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersionXSS, self).setUp()
self.version = Addon.objects.get(id=3615).current_version
assert self.client.login(email='[email protected]')
def test_unique_version_num(self):
# Can't use a "/" to close the tag, as we're doing a get_url_path on
# it, which uses addons.versions, which consumes up to the first "/"
# encountered.
self.version.update(
version='<script>alert("Happy XSS-Xmas");<script>')
response = self.client.get(reverse('devhub.addons'))
assert response.status_code == 200
assert b'<script>alert' not in response.content
assert b'<script>alert' in response.content
class TestDeleteAddon(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDeleteAddon, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = self.addon.get_dev_url('delete')
self.client.login(email='[email protected]')
def test_bad_password(self):
response = self.client.post(self.url, {'slug': 'nope'})
self.assert3xx(response, self.addon.get_dev_url('versions'))
assert response.context['title'] == (
'URL name was incorrect. Add-on was not deleted.')
assert Addon.objects.count() == 1
def test_success(self):
response = self.client.post(self.url, {'slug': 'a3615'})
self.assert3xx(response, reverse('devhub.addons'))
assert response.context['title'] == 'Add-on deleted.'
assert Addon.objects.count() == 0
class TestRequestReview(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRequestReview, self).setUp()
self.addon = addon_factory()
self.version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
self.redirect_url = self.addon.get_dev_url('versions')
self.public_url = reverse('devhub.request-review',
args=[self.addon.slug])
assert self.client.login(email='[email protected]')
def get_addon(self):
return Addon.objects.get(id=self.addon.id)
def get_version(self):
return Version.objects.get(pk=self.version.id)
def check_400(self, url):
response = self.client.post(url)
assert response.status_code == 400
def test_public(self):
self.addon.update(status=amo.STATUS_APPROVED)
self.check_400(self.public_url)
@mock.patch('olympia.addons.models.Addon.has_complete_metadata')
def test_renominate_for_full_review(self, mock_has_complete_metadata):
# When a version is rejected, the addon is disabled.
# The author must upload a new version and re-nominate.
# Renominating the same version resets the nomination date.
mock_has_complete_metadata.return_value = True
orig_date = datetime.now() - timedelta(days=30)
# Pretend it was nominated in the past:
self.version.update(nomination=orig_date)
self.addon.update(status=amo.STATUS_NULL)
response = self.client.post(self.public_url)
self.assert3xx(response, self.redirect_url)
assert self.get_addon().status == amo.STATUS_NOMINATED
assert self.get_version().nomination.timetuple()[0:5] != (
orig_date.timetuple()[0:5])
class TestRedirects(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestRedirects, self).setUp()
self.base = reverse('devhub.index')
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.user.update(last_login_ip='192.168.1.1')
def test_edit(self):
url = self.base + 'addon/edit/3615'
response = self.client.get(url, follow=True)
self.assert3xx(
response, reverse('devhub.addons.edit', args=['a3615']), 301)
url = self.base + 'addon/edit/3615/'
response = self.client.get(url, follow=True)
self.assert3xx(
response, reverse('devhub.addons.edit', args=['a3615']), 301)
def test_status(self):
url = self.base + 'addon/status/3615'
response = self.client.get(url, follow=True)
self.assert3xx(
response, reverse('devhub.addons.versions', args=['a3615']), 301)
def test_versions(self):
url = self.base + 'versions/3615'
response = self.client.get(url, follow=True)
self.assert3xx(
response, reverse('devhub.addons.versions', args=['a3615']), 301)
def test_lwt_submit_redirects_to_addon_submit(self):
url = reverse('devhub.themes.submit')
response = self.client.get(url, follow=True)
self.assert3xx(
response, reverse('devhub.submit.distribution'), 302)
class TestHasCompleteMetadataRedirects(TestCase):
"""Make sure Addons that are not complete in some way are correctly
redirected to the right view (and don't end up in a redirect loop)."""
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasCompleteMetadataRedirects, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = RequestFactory().get('developers/addon/a3615/edit')
self.request.user = UserProfile.objects.get(email='[email protected]')
self.addon = Addon.objects.get(id=3615)
self.addon.update(status=amo.STATUS_NULL)
self.addon = Addon.objects.get(id=3615)
assert self.addon.has_complete_metadata(), (
self.addon.get_required_metadata())
assert not self.addon.should_redirect_to_submit_flow()
# We need to be logged in for any redirection into real views.
assert self.client.login(email='[email protected]')
def _test_redirect(self):
func = dev_required(self.f)
response = func(self.request, addon_id='a3615')
assert not self.f.called
assert response.status_code == 302
assert response['Location'] == (
'/en-US/developers/addon/a3615/submit/details')
# Check the redirection doesn't redirect also.
redirection = self.client.get(response['Location'])
assert redirection.status_code == 200
def test_default(self):
func = dev_required(self.f)
func(self.request, addon_id='a3615')
# Don't redirect if there is no metadata to collect.
assert self.f.called
def test_no_summary(self):
delete_translation(self.addon, 'summary')
self._test_redirect()
def test_no_license(self):
self.addon.current_version.update(license=None)
self._test_redirect()
def test_no_license_no_summary(self):
self.addon.current_version.update(license=None)
delete_translation(self.addon, 'summary')
self._test_redirect()
class TestDocs(TestCase):
def test_doc_urls(self):
assert '/en-US/developers/docs/' == reverse('devhub.docs', args=[])
assert '/en-US/developers/docs/te' == reverse(
'devhub.docs', args=['te'])
assert '/en-US/developers/docs/te/st', reverse(
'devhub.docs', args=['te/st'])
urls = [(reverse('devhub.docs', args=["getting-started"]), 301),
(reverse('devhub.docs', args=["how-to"]), 301),
(reverse('devhub.docs', args=["how-to/other-addons"]), 301),
(reverse('devhub.docs', args=["fake-page"]), 404),
(reverse('devhub.docs', args=["how-to/fake-page"]), 404),
(reverse('devhub.docs'), 301)]
index = reverse('devhub.index')
for url in urls:
response = self.client.get(url[0])
assert response.status_code == url[1]
if url[1] == 302: # Redirect to the index page
self.assert3xx(response, index)
class TestRemoveLocale(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestRemoveLocale, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = reverse('devhub.addons.remove-locale', args=['a3615'])
assert self.client.login(email='[email protected]')
def test_bad_request(self):
response = self.client.post(self.url)
assert response.status_code == 400
def test_success(self):
self.addon.name = {'en-US': 'woo', 'el': 'yeah'}
self.addon.save()
self.addon.remove_locale('el')
qs = (Translation.objects.filter(localized_string__isnull=False)
.values_list('locale', flat=True))
response = self.client.post(self.url, {'locale': 'el'})
assert response.status_code == 200
assert sorted(qs.filter(id=self.addon.name_id)) == ['en-US']
def test_delete_default_locale(self):
response = self.client.post(
self.url, {'locale': self.addon.default_locale})
assert response.status_code == 400
def test_remove_version_locale(self):
version = self.addon.versions.all()[0]
version.release_notes = {'fr': 'oui'}
version.save()
self.client.post(self.url, {'locale': 'fr'})
res = self.client.get(reverse('devhub.versions.edit',
args=[self.addon.slug, version.pk]))
doc = pq(res.content)
# There's 2 fields, one for en-us, one for init.
assert len(doc('div.trans textarea')) == 2
class TestXssOnAddonName(amo.tests.TestXss):
def test_devhub_feed_page(self):
url = reverse('devhub.feed', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_devhub_addon_edit_page(self):
url = reverse('devhub.addons.edit', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_devhub_version_edit_page(self):
url = reverse('devhub.versions.edit', args=[self.addon.slug,
self.addon.current_version.id])
self.assertNameAndNoXSS(url)
def test_devhub_version_list_page(self):
url = reverse('devhub.addons.versions', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
@pytest.mark.django_db
def test_get_next_version_number():
addon = addon_factory(version_kw={'version': '1.0'})
# Easy case - 1.0 to 2.0
assert get_next_version_number(addon) == '2.0'
# version numbers without minor numbers should be okay too.
version_factory(addon=addon, version='2')
assert get_next_version_number(addon) == '3.0'
# We just iterate the major version number
addon.current_version.update(version='34.45.0a1pre')
addon.current_version.save()
assert get_next_version_number(addon) == '35.0'
# "Take" 35.0
version_factory(addon=addon, version='35.0',
file_kw={'status': amo.STATUS_DISABLED})
assert get_next_version_number(addon) == '36.0'
# And 36.0, even though it's deleted.
version_factory(addon=addon, version='36.0').delete()
assert addon.current_version.version == '34.45.0a1pre'
assert get_next_version_number(addon) == '37.0'
class TestThemeBackgroundImage(TestCase):
def setUp(self):
user = user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
self.addon = addon_factory(users=[user])
self.url = reverse(
'devhub.submit.version.previous_background',
args=[self.addon.slug, 'listed'])
def test_wrong_user(self):
user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
response = self.client.post(self.url, follow=True)
assert response.status_code == 403
def test_no_header_image(self):
response = self.client.post(self.url, follow=True)
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data == {}
def test_header_image(self):
destination = self.addon.current_version.all_files[0].current_file_path
zip_file = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
copy_stored_file(zip_file, destination)
response = self.client.post(self.url, follow=True)
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data
assert len(data.items()) == 1
assert 'weta.png' in data
assert len(data['weta.png']) == 168596 # base64-encoded size
class TestLogout(UserViewBase):
def test_success(self):
user = UserProfile.objects.get(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(reverse('devhub.index'), follow=True)
assert (
pq(response.content)('li a.avatar').attr('href') == (
user.get_url_path()))
assert (
pq(response.content)('li a.avatar img').attr('src') == (
user.picture_url))
response = self.client.get('/en-US/developers/logout', follow=False)
self.assert3xx(response, '/en-US/firefox/', status_code=302)
response = self.client.get(reverse('devhub.index'), follow=True)
assert not pq(response.content)('li a.avatar')
def test_redirect(self):
self.client.login(email='[email protected]')
self.client.get(reverse('devhub.index'), follow=True)
url = '/en-US/about'
response = self.client.get(urlparams(reverse('devhub.logout'), to=url),
follow=True)
self.assert3xx(response, url, status_code=302)
# Test an invalid domain
url = urlparams(reverse('devhub.logout'), to='/en-US/about',
domain='http://evil.com')
response = self.client.get(url, follow=False)
self.assert3xx(response, '/en-US/about', status_code=302)
def test_session_cookie_deleted_on_logout(self):
self.client.login(email='[email protected]')
self.client.cookies[API_TOKEN_COOKIE] = 'some.token.value'
response = self.client.get(reverse('devhub.logout'))
cookie = response.cookies[settings.SESSION_COOKIE_NAME]
cookie_date_string = u'Thu, 01 Jan 1970 00:00:00 GMT'
assert cookie.value == ''
# in django2.1+ changed to django.utils.http.http_date from cookie_date
assert cookie['expires'].replace('-', ' ') == cookie_date_string
jwt_cookie = response.cookies[API_TOKEN_COOKIE]
assert jwt_cookie.value == ''
assert jwt_cookie['expires'].replace('-', ' ') == cookie_date_string
class TestStatsLinksInManageMySubmissionsPage(TestCase):
def setUp(self):
super().setUp()
self.user = user_factory()
self.addon = addon_factory(users=[self.user])
self.url = reverse('devhub.addons')
self.client.login(email=self.user.email)
def test_link_to_stats(self):
response = self.client.get(self.url)
assert (reverse('stats.overview', args=[self.addon.slug]) in
str(response.content))
def test_link_to_stats_for_addon_disabled_by_user(self):
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
assert (reverse('stats.overview', args=[self.addon.slug]) in
str(response.content))
def test_link_to_stats_for_unlisted_addon(self):
self.make_addon_unlisted(self.addon)
response = self.client.get(self.url)
assert (reverse('stats.overview', args=[self.addon.slug]) in
str(response.content))
def test_no_link_for_addon_disabled_by_mozilla(self):
self.addon.update(status=amo.STATUS_DISABLED)
self.make_addon_unlisted(self.addon)
response = self.client.get(self.url)
assert (reverse('stats.overview', args=[self.addon.slug]) not in
str(response.content))
def test_link_to_stats_for_langpacks(self):
self.addon.update(type=amo.ADDON_LPAPP)
response = self.client.get(self.url)
assert (reverse('stats.overview', args=[self.addon.slug]) in
str(response.content))
def test_link_to_stats_for_dictionaries(self):
self.addon.update(type=amo.ADDON_DICT)
response = self.client.get(self.url)
assert (reverse('stats.overview', args=[self.addon.slug]) in
str(response.content))
| {
"content_hash": "0c12c70026075bff5ed353d86a1e22d2",
"timestamp": "",
"source": "github",
"line_count": 2043,
"max_line_length": 79,
"avg_line_length": 41.90846793930494,
"alnum_prop": 0.6199792102220302,
"repo_name": "eviljeff/olympia",
"id": "93f0f413f6064160c15783bafc6c6ffe0fa0e696",
"size": "85647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/devhub/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "251925"
},
{
"name": "Dockerfile",
"bytes": "4063"
},
{
"name": "HTML",
"bytes": "314372"
},
{
"name": "JavaScript",
"bytes": "865804"
},
{
"name": "Less",
"bytes": "307222"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6146705"
},
{
"name": "Shell",
"bytes": "8000"
},
{
"name": "Smarty",
"bytes": "1413"
}
],
"symlink_target": ""
} |
"""Denon HEOS Media Player."""
import asyncio
from functools import reduce, wraps
import logging
from operator import ior
from typing import Sequence
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
DOMAIN, MEDIA_TYPE_MUSIC, MEDIA_TYPE_URL, SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET,
SUPPORT_STOP, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import utcnow
from .const import (
DATA_SOURCE_MANAGER, DOMAIN as HEOS_DOMAIN, SIGNAL_HEOS_SOURCES_UPDATED)
BASE_SUPPORTED_FEATURES = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_STEP | SUPPORT_CLEAR_PLAYLIST | \
SUPPORT_SHUFFLE_SET | SUPPORT_SELECT_SOURCE | \
SUPPORT_PLAY_MEDIA
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Platform uses config entry setup."""
pass
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry,
async_add_entities):
"""Add media players for a config entry."""
players = hass.data[HEOS_DOMAIN][DOMAIN]
devices = [HeosMediaPlayer(player) for player in players.values()]
async_add_entities(devices, True)
def log_command_error(command: str):
"""Return decorator that logs command failure."""
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
from pyheos import CommandError
try:
await func(*args, **kwargs)
except (CommandError, asyncio.TimeoutError, ConnectionError) as ex:
_LOGGER.error("Unable to %s: %s", command, ex)
return wrapper
return decorator
class HeosMediaPlayer(MediaPlayerDevice):
"""The HEOS player."""
def __init__(self, player):
"""Initialize."""
from pyheos import const
self._media_position_updated_at = None
self._player = player
self._signals = []
self._supported_features = BASE_SUPPORTED_FEATURES
self._source_manager = None
self._play_state_to_state = {
const.PLAY_STATE_PLAY: STATE_PLAYING,
const.PLAY_STATE_STOP: STATE_IDLE,
const.PLAY_STATE_PAUSE: STATE_PAUSED
}
self._control_to_support = {
const.CONTROL_PLAY: SUPPORT_PLAY,
const.CONTROL_PAUSE: SUPPORT_PAUSE,
const.CONTROL_STOP: SUPPORT_STOP,
const.CONTROL_PLAY_PREVIOUS: SUPPORT_PREVIOUS_TRACK,
const.CONTROL_PLAY_NEXT: SUPPORT_NEXT_TRACK
}
async def _controller_event(self, event, data):
"""Handle controller event."""
from pyheos import const
if event == const.EVENT_PLAYERS_CHANGED:
await self.async_update_ha_state(True)
async def _heos_event(self, event):
"""Handle connection event."""
from pyheos import CommandError, const
if event == const.EVENT_CONNECTED:
try:
await self._player.refresh()
except (CommandError, asyncio.TimeoutError, ConnectionError) as ex:
_LOGGER.error("Unable to refresh player %s: %s",
self._player, ex)
await self.async_update_ha_state(True)
async def _player_update(self, player_id, event):
"""Handle player attribute updated."""
from pyheos import const
if self._player.player_id != player_id:
return
if event == const.EVENT_PLAYER_NOW_PLAYING_PROGRESS:
self._media_position_updated_at = utcnow()
await self.async_update_ha_state(True)
async def _sources_updated(self):
"""Handle sources changed."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Device added to hass."""
from pyheos import const
self._source_manager = self.hass.data[HEOS_DOMAIN][DATA_SOURCE_MANAGER]
# Update state when attributes of the player change
self._signals.append(self._player.heos.dispatcher.connect(
const.SIGNAL_PLAYER_EVENT, self._player_update))
# Update state when available players change
self._signals.append(self._player.heos.dispatcher.connect(
const.SIGNAL_CONTROLLER_EVENT, self._controller_event))
# Update state upon connect/disconnects
self._signals.append(self._player.heos.dispatcher.connect(
const.SIGNAL_HEOS_EVENT, self._heos_event))
# Update state when sources change
self._signals.append(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_HEOS_SOURCES_UPDATED, self._sources_updated))
@log_command_error("clear playlist")
async def async_clear_playlist(self):
"""Clear players playlist."""
await self._player.clear_queue()
@log_command_error("pause")
async def async_media_pause(self):
"""Send pause command."""
await self._player.pause()
@log_command_error("play")
async def async_media_play(self):
"""Send play command."""
await self._player.play()
@log_command_error("move to previous track")
async def async_media_previous_track(self):
"""Send previous track command."""
await self._player.play_previous()
@log_command_error("move to next track")
async def async_media_next_track(self):
"""Send next track command."""
await self._player.play_next()
@log_command_error("stop")
async def async_media_stop(self):
"""Send stop command."""
await self._player.stop()
@log_command_error("set mute")
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self._player.set_mute(mute)
@log_command_error("play media")
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if media_type == MEDIA_TYPE_URL:
await self._player.play_url(media_id)
else:
_LOGGER.error("Unable to play media: Unsupported media type '%s'",
media_type)
@log_command_error("select source")
async def async_select_source(self, source):
"""Select input source."""
await self._source_manager.play_source(source, self._player)
@log_command_error("set shuffle")
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self._player.set_play_mode(self._player.repeat, shuffle)
@log_command_error("set volume level")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._player.set_volume(int(volume * 100))
async def async_update(self):
"""Update supported features of the player."""
controls = self._player.now_playing_media.supported_controls
current_support = [self._control_to_support[control]
for control in controls]
self._supported_features = reduce(ior, current_support,
BASE_SUPPORTED_FEATURES)
async def async_will_remove_from_hass(self):
"""Disconnect the device when removed."""
for signal_remove in self._signals:
signal_remove()
self._signals.clear()
@property
def available(self) -> bool:
"""Return True if the device is available."""
return self._player.available
@property
def device_info(self) -> dict:
"""Get attributes about the device."""
return {
'identifiers': {
(DOMAIN, self._player.player_id)
},
'name': self._player.name,
'model': self._player.model,
'manufacturer': 'HEOS',
'sw_version': self._player.version
}
@property
def device_state_attributes(self) -> dict:
"""Get additional attribute about the state."""
return {
'media_album_id': self._player.now_playing_media.album_id,
'media_queue_id': self._player.now_playing_media.queue_id,
'media_source_id': self._player.now_playing_media.source_id,
'media_station': self._player.now_playing_media.station,
'media_type': self._player.now_playing_media.type
}
@property
def is_volume_muted(self) -> bool:
"""Boolean if volume is currently muted."""
return self._player.is_muted
@property
def media_album_name(self) -> str:
"""Album name of current playing media, music track only."""
return self._player.now_playing_media.album
@property
def media_artist(self) -> str:
"""Artist of current playing media, music track only."""
return self._player.now_playing_media.artist
@property
def media_content_id(self) -> str:
"""Content ID of current playing media."""
return self._player.now_playing_media.media_id
@property
def media_content_type(self) -> str:
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
duration = self._player.now_playing_media.duration
if isinstance(duration, int):
return duration / 1000
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
# Some media doesn't have duration but reports position, return None
if not self._player.now_playing_media.duration:
return None
return self._player.now_playing_media.current_position / 1000
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
# Some media doesn't have duration but reports position, return None
if not self._player.now_playing_media.duration:
return None
return self._media_position_updated_at
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
@property
def media_image_url(self) -> str:
"""Image url of current playing media."""
# May be an empty string, if so, return None
image_url = self._player.now_playing_media.image_url
return image_url if image_url else None
@property
def media_title(self) -> str:
"""Title of current playing media."""
return self._player.now_playing_media.song
@property
def name(self) -> str:
"""Return the name of the device."""
return self._player.name
@property
def should_poll(self) -> bool:
"""No polling needed for this device."""
return False
@property
def shuffle(self) -> bool:
"""Boolean if shuffle is enabled."""
return self._player.shuffle
@property
def source(self) -> str:
"""Name of the current input source."""
return self._source_manager.get_current_source(
self._player.now_playing_media)
@property
def source_list(self) -> Sequence[str]:
"""List of available input sources."""
return self._source_manager.source_list
@property
def state(self) -> str:
"""State of the player."""
return self._play_state_to_state[self._player.state]
@property
def supported_features(self) -> int:
"""Flag media player features that are supported."""
return self._supported_features
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return str(self._player.player_id)
@property
def volume_level(self) -> float:
"""Volume level of the media player (0..1)."""
return self._player.volume / 100
| {
"content_hash": "1f2895af7acb82fb042f34b1e1703973",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 79,
"avg_line_length": 36.12280701754386,
"alnum_prop": 0.6210134369435001,
"repo_name": "DavidLP/home-assistant",
"id": "18ed3fd736228b26d6de95888224dbf7067b31d4",
"size": "12354",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/heos/media_player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15309293"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class HTransitionHListenBranch(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule TransitionHListenBranch.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HTransitionHListenBranch, self).__init__(name='HTransitionHListenBranch', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """TransitionHListenBranch"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'TransitionHListenBranch')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Transition() node
self.add_node()
self.vs[3]["mm__"] = """Transition"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Transition()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Vertex() node
self.add_node()
self.vs[5]["mm__"] = """Vertex"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class Vertex()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class StateMachine() node
self.add_node()
self.vs[7]["mm__"] = """StateMachine"""
self.vs[7]["attr1"] = """+"""
# match_contains node for class StateMachine()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# match class State() node
self.add_node()
self.vs[9]["mm__"] = """State"""
self.vs[9]["attr1"] = """+"""
# match_contains node for class State()
self.add_node()
self.vs[10]["mm__"] = """match_contains"""
# match class Trigger() node
self.add_node()
self.vs[11]["mm__"] = """Trigger"""
self.vs[11]["attr1"] = """1"""
# match_contains node for class Trigger()
self.add_node()
self.vs[12]["mm__"] = """match_contains"""
# match class Signal() node
self.add_node()
self.vs[13]["mm__"] = """Signal"""
self.vs[13]["attr1"] = """1"""
# match_contains node for class Signal()
self.add_node()
self.vs[14]["mm__"] = """match_contains"""
# match class State() node
self.add_node()
self.vs[15]["mm__"] = """State"""
self.vs[15]["attr1"] = """1"""
# match_contains node for class State()
self.add_node()
self.vs[16]["mm__"] = """match_contains"""
# apply class Listen() node
self.add_node()
self.vs[17]["mm__"] = """Listen"""
self.vs[17]["attr1"] = """1"""
# apply_contains node for class Listen()
self.add_node()
self.vs[18]["mm__"] = """apply_contains"""
# apply class ListenBranch() node
self.add_node()
self.vs[19]["mm__"] = """ListenBranch"""
self.vs[19]["attr1"] = """1"""
# apply_contains node for class ListenBranch()
self.add_node()
self.vs[20]["mm__"] = """apply_contains"""
# apply class Seq() node
self.add_node()
self.vs[21]["mm__"] = """Seq"""
self.vs[21]["attr1"] = """1"""
# apply_contains node for class Seq()
self.add_node()
self.vs[22]["mm__"] = """apply_contains"""
# apply class Trigger() node
self.add_node()
self.vs[23]["mm__"] = """Trigger"""
self.vs[23]["attr1"] = """1"""
# apply_contains node for class Trigger()
self.add_node()
self.vs[24]["mm__"] = """apply_contains"""
# apply class Listen() node
self.add_node()
self.vs[25]["mm__"] = """Listen"""
self.vs[25]["attr1"] = """1"""
# apply_contains node for class Listen()
self.add_node()
self.vs[26]["mm__"] = """apply_contains"""
# apply class ListenBranch() node
self.add_node()
self.vs[27]["mm__"] = """ListenBranch"""
self.vs[27]["attr1"] = """1"""
# apply_contains node for class ListenBranch()
self.add_node()
self.vs[28]["mm__"] = """apply_contains"""
# apply class Inst() node
self.add_node()
self.vs[29]["mm__"] = """Inst"""
self.vs[29]["attr1"] = """1"""
# apply_contains node for class Inst()
self.add_node()
self.vs[30]["mm__"] = """apply_contains"""
# match association Transition--src-->Vertex node
self.add_node()
self.vs[31]["attr1"] = """src"""
self.vs[31]["mm__"] = """directLink_S"""
# match association Vertex--owningStateMachine-->StateMachine node
self.add_node()
self.vs[32]["attr1"] = """owningStateMachine"""
self.vs[32]["mm__"] = """directLink_S"""
# match association StateMachine--states-->State node
self.add_node()
self.vs[33]["attr1"] = """states"""
self.vs[33]["mm__"] = """directLink_S"""
# match association Transition--triggers-->Trigger node
self.add_node()
self.vs[34]["attr1"] = """triggers"""
self.vs[34]["mm__"] = """directLink_S"""
# match association Trigger--signal-->Signal node
self.add_node()
self.vs[35]["attr1"] = """signal"""
self.vs[35]["mm__"] = """directLink_S"""
# match association State--outgoingTransitions-->Transition node
self.add_node()
self.vs[36]["attr1"] = """outgoingTransitions"""
self.vs[36]["mm__"] = """directLink_S"""
# apply association Listen--branches-->ListenBranch node
self.add_node()
self.vs[37]["attr1"] = """branches"""
self.vs[37]["mm__"] = """directLink_T"""
# apply association ListenBranch--p-->Seq node
self.add_node()
self.vs[38]["attr1"] = """p"""
self.vs[38]["mm__"] = """directLink_T"""
# apply association Seq--p-->Trigger node
self.add_node()
self.vs[39]["attr1"] = """p"""
self.vs[39]["mm__"] = """directLink_T"""
# apply association Seq--p-->Listen node
self.add_node()
self.vs[40]["attr1"] = """p"""
self.vs[40]["mm__"] = """directLink_T"""
# apply association Listen--branches-->ListenBranch node
self.add_node()
self.vs[41]["attr1"] = """branches"""
self.vs[41]["mm__"] = """directLink_T"""
# apply association ListenBranch--p-->Inst node
self.add_node()
self.vs[42]["attr1"] = """p"""
self.vs[42]["mm__"] = """directLink_T"""
# backward association Transition---->Inst node
self.add_node()
self.vs[43]["mm__"] = """backward_link"""
# backward association State---->Listen node
self.add_node()
self.vs[44]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Transition()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Vertex()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class StateMachine()
(0,10), # matchmodel -> match_contains
(10,9), # match_contains -> match_class State()
(0,12), # matchmodel -> match_contains
(12,11), # match_contains -> match_class Trigger()
(0,14), # matchmodel -> match_contains
(14,13), # match_contains -> match_class Signal()
(0,16), # matchmodel -> match_contains
(16,15), # match_contains -> match_class State()
(1,18), # applymodel -> apply_contains
(18,17), # apply_contains -> apply_class Listen()
(1,20), # applymodel -> apply_contains
(20,19), # apply_contains -> apply_class ListenBranch()
(1,22), # applymodel -> apply_contains
(22,21), # apply_contains -> apply_class Seq()
(1,24), # applymodel -> apply_contains
(24,23), # apply_contains -> apply_class Trigger()
(1,26), # applymodel -> apply_contains
(26,25), # apply_contains -> apply_class Listen()
(1,28), # applymodel -> apply_contains
(28,27), # apply_contains -> apply_class ListenBranch()
(1,30), # applymodel -> apply_contains
(30,29), # apply_contains -> apply_class Inst()
(3,31), # match_class Transition() -> association src
(31,5), # association src -> match_class Vertex()
(5,32), # match_class Vertex() -> association owningStateMachine
(32,7), # association owningStateMachine -> match_class StateMachine()
(7,33), # match_class StateMachine() -> association states
(33,15), # association states -> match_class State()
(3,34), # match_class Transition() -> association triggers
(34,11), # association triggers -> match_class Trigger()
(11,35), # match_class Trigger() -> association signal
(35,13), # association signal -> match_class Signal()
(9,36), # match_class State() -> association outgoingTransitions
(36,3), # association outgoingTransitions -> match_class Transition()
(17,37), # apply_class Listen() -> association branches
(37,19), # association branches -> apply_class ListenBranch()
(19,38), # apply_class ListenBranch() -> association p
(38,21), # association p -> apply_class Seq()
(21,39), # apply_class Seq() -> association p
(39,23), # association p -> apply_class Trigger()
(21,40), # apply_class Seq() -> association p
(40,25), # association p -> apply_class Listen()
(25,41), # apply_class Listen() -> association branches
(41,27), # association branches -> apply_class ListenBranch()
(27,42), # apply_class ListenBranch() -> association p
(42,29), # association p -> apply_class Inst()
(29,43), # apply_class Inst() -> backward_association
(43,3), # backward_association -> apply_class Transition()
(25,44), # apply_class Listen() -> backward_association
(44,9), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((17,'ApplyAttribute'),('constant','solveRef')), ((19,'channel'),(13,'name')), ((23,'channel'),('constant','exit_in')), ((27,'channel'),('constant','exack_in')), ((29,'ApplyAttribute'),('constant','solveRef')), ]
| {
"content_hash": "18f04ab1b0571c70d426309c4fb0d7aa",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 241,
"avg_line_length": 40.93617021276596,
"alnum_prop": 0.5025121275121275,
"repo_name": "levilucio/SyVOLT",
"id": "96049b3b3898a23c291e64a781f9917370398c64",
"size": "11544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/transformation/from_ATL/HTransitionHListenBranch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinxcontrib.bibtex',
]
# If true, figures, tables and code-blocks are automatically numbered if they have a caption.
numfig = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'The RTRlib Handbook'
copyright = u'2016-2017, Sebastian Meiling, Marcel Röthke, and Colin Sames, for HAW Hamburg'
author = u'Sebastian Meiling, Marcel Röthke, Colin Sames'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.4'
# The full version, including alpha/beta/rc tags.
release = u'0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'RTRlib Handbook v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'RTRlibHandbookdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
'classoptions': ',openany,oneside',
'babel': '\\usepackage[english]{babel}',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RTRlibHandbook.tex', u'RTRlib Handbook',
u'Sebastian Meiling, Marcel Röthke, Colin Sames', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rtrlibhandbook', u'RTRlib Handbook Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RTRlibHandbook', u'The RTRlib Handbook',
author, 'RTRlibHandbook', 'An Introduction to the RTRlib.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| {
"content_hash": "59d9056d1e9b3aee5447fed73a8f3a86",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 93,
"avg_line_length": 29.306060606060605,
"alnum_prop": 0.6923792782545756,
"repo_name": "rtrlib/handbook",
"id": "07c26c0122fb976cd16097e0133c9716caa82c02",
"size": "10410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7476"
},
{
"name": "Makefile",
"bytes": "7781"
},
{
"name": "Python",
"bytes": "10410"
},
{
"name": "TeX",
"bytes": "3507"
}
],
"symlink_target": ""
} |
import pytest
import io
import simplejson
from pymongo import MongoClient
class MongoTestInitializer(object):
"""
"""
def __init__(self, host="localhost", port=27017, db_name="twinkle_unittest_db", collection_name="tweets"):
self.host = host
self.port = port
self.db_name = db_name
self.collection_name = collection_name
self.client = None
self.db = None
self.collection = None
def connect(self):
"""
"""
self.client = MongoClient(host, port)
self.db = client[self.db_name]
self.collection = self.db[self.collection_name]
def disconnect(self):
if self.client is not None:
self.client.close()
self.client = None
self.db = None
self.collection = None
def setup(self, filename, encoding="UTF-8"):
with io.open(filename, "r", encoding=encoding) as f:
for l in f:
o = simplejson.loads(l)
self.collection.insert(o)
def teardown(self):
pass
| {
"content_hash": "7c46fce0ff5e0ee6f893d962294f7b73",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 107,
"avg_line_length": 20.11111111111111,
"alnum_prop": 0.6817679558011049,
"repo_name": "emCOMP/twinkle",
"id": "09cd0c02f42678ad9be97924a6e5fdf3127fda16",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/adapters/mongo_initializer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47156"
}
],
"symlink_target": ""
} |
###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <[email protected]>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 800)
HPDF_Page_SetHeight (page, 800)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 30, 740)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 30)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 30)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 30)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutline (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutline (pdf, root, "page2", NULL)
# create outline with test which is ISO8859-2 encoding
#outline[2] = HPDF_CreateOutline (pdf, root, "ISO8859-2 text 釉罩棕?,
# HPDF_GetEncoder (pdf, "ISO8859-2"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
#HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | {
"content_hash": "4361ed043e7b10daf23529b6eb275371",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 80,
"avg_line_length": 28.841269841269842,
"alnum_prop": 0.631535498073748,
"repo_name": "frnde/libhpdf-AndroidLibrary",
"id": "33f65b967db7415f0250e05927a589b323135577",
"size": "3659",
"binary": false,
"copies": "32",
"ref": "refs/heads/master",
"path": "library/jni/libhpdf-2.3.0RC2/if/python/demo/outline_demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "43946"
},
{
"name": "Batchfile",
"bytes": "3668"
},
{
"name": "C",
"bytes": "5470904"
},
{
"name": "C#",
"bytes": "113669"
},
{
"name": "C++",
"bytes": "20824"
},
{
"name": "CMake",
"bytes": "50848"
},
{
"name": "DIGITAL Command Language",
"bytes": "11084"
},
{
"name": "Java",
"bytes": "28845"
},
{
"name": "Makefile",
"bytes": "107288"
},
{
"name": "Module Management System",
"bytes": "2127"
},
{
"name": "Objective-C",
"bytes": "2669"
},
{
"name": "Pascal",
"bytes": "80391"
},
{
"name": "Python",
"bytes": "279332"
},
{
"name": "Roff",
"bytes": "239186"
},
{
"name": "Ruby",
"bytes": "32509"
},
{
"name": "Shell",
"bytes": "299622"
},
{
"name": "Visual Basic",
"bytes": "201515"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .lapjv import lapjv
__all__ = ['lapjv']
| {
"content_hash": "0ee1a7298e65a66ddb52f3bbfd1324a1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 21.88888888888889,
"alnum_prop": 0.7208121827411168,
"repo_name": "bnoi/scikit-tracker",
"id": "efb03e1c994c0928660abe58e18642a4622d019b",
"size": "224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sktracker/tracker/lapjv/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "29047"
},
{
"name": "Makefile",
"bytes": "1808"
},
{
"name": "Python",
"bytes": "517305"
},
{
"name": "Shell",
"bytes": "1442"
}
],
"symlink_target": ""
} |
import json
import os
from wlgen import PerfMessaging, PerfPipe
from test_wlgen import WlgenSelfBase
class PerfBenchBase(WlgenSelfBase):
"""Base class for common testing of PerfBench workloads"""
def _do_test_performance_json(self, fields):
"""Test performance.json was created with the required fields"""
json_path = os.path.join(self.host_out_dir, 'performance.json')
try:
with open(json_path) as fh:
perf_json = json.load(fh)
except IOError:
raise AssertionError(
"Perf workload didn't create performance report file")
for field in fields:
msg = 'Perf performance report missing "{}" field'.format(field)
self.assertIn(field, perf_json, msg)
class TestPerfMessaging(PerfBenchBase):
tools = ['perf']
def test_perf_messaging_smoke(self):
"""
Test PerfMessaging workload
Runs a perf messaging workload and tests that the expected output was
produced.
"""
perf = PerfMessaging(self.target, 'perf_messaing')
perf.conf(group=1, loop=100, pipe=True, thread=True,
run_dir=self.target_run_dir)
os.makedirs(self.host_out_dir)
perf.run(out_dir=self.host_out_dir)
self._do_test_performance_json(['ctime', 'performance'])
class TestPerfPipe(PerfBenchBase):
tools = ['perf']
def test_perf_pipe_smoke(self):
"""
Test PerfPipe workload
Runs a 'PerfPipe' workload and tests that the expected output was
produced.
"""
perf = PerfPipe(self.target, 'perfpipe')
perf.conf(loop=100000)
os.makedirs(self.host_out_dir)
perf.run(out_dir=self.host_out_dir)
self._do_test_performance_json(
['ctime', 'performance', 'usec/op', 'ops/sec'])
| {
"content_hash": "b01259049e71769abcabc72e755a075e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 31.033333333333335,
"alnum_prop": 0.6192266380236305,
"repo_name": "bjackman/lisa",
"id": "c6ecd9c26e33c654a88806eab990ab2a8feee2f8",
"size": "2499",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/lisa/test_wlgen_perf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "40404792"
},
{
"name": "Python",
"bytes": "672179"
},
{
"name": "Shell",
"bytes": "2066"
}
],
"symlink_target": ""
} |
import json
from contentcuration.models import User
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, UserChangeForm, PasswordChangeForm
from le_utils.constants import exercises, licenses
class RegistrationForm(UserCreationForm):
password1 = forms.CharField(widget=forms.PasswordInput, label='Password', required=True)
password2 = forms.CharField(widget=forms.PasswordInput, label='Password (again)', required=True)
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'password1', 'password2')
def clean_email(self):
email = self.cleaned_data['email'].strip()
if User.objects.filter(email__iexact=email, is_active=True).exists():
self.add_error('email', 'Email already exists.')
else:
return email
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
self.check_field('email', 'Email is required.')
self.check_field('first_name', 'First name is required.')
self.check_field('last_name', 'Last name is required.')
if self.check_field('password1', 'Password is required.'):
if 'password2' not in self.cleaned_data or self.cleaned_data['password1'] != self.cleaned_data['password2']:
self.errors['password2'] = self.error_class()
self.add_error('password2', 'Passwords don\'t match.')
else:
self.errors['password2'] = self.error_class()
return self.cleaned_data
def check_field(self, field, error):
if field not in self.cleaned_data:
self.errors[field] = self.error_class()
self.add_error(field, error)
return False
return True
class InvitationForm(UserCreationForm):
password1 = forms.CharField(widget=forms.PasswordInput, label='Password', required=True)
password2 = forms.CharField(widget=forms.PasswordInput, label='Password (again)', required=True)
class Meta:
model = User
fields = ('first_name', 'last_name', 'password1', 'password2')
def clean_email(self):
email = self.cleaned_data['email'].strip()
return email
def clean(self):
cleaned_data = super(InvitationForm, self).clean()
self.check_field('first_name', 'First name is required.')
self.check_field('last_name', 'Last name is required.')
if self.check_field('password1', 'Password is required.'):
if 'password2' not in self.cleaned_data or self.cleaned_data['password1'] != self.cleaned_data['password2']:
self.errors['password2'] = self.error_class()
self.add_error('password2', 'Passwords don\'t match.')
else:
self.errors['password2'] = self.error_class()
return self.cleaned_data
def check_field(self, field, error):
if field not in self.cleaned_data:
self.errors[field] = self.error_class()
self.add_error(field, error)
return False
return True
def save(self, user):
user.set_password(self.cleaned_data["password1"])
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
user.is_active=True
user.save()
return user
class InvitationAcceptForm(AuthenticationForm):
user=None
password = forms.CharField(widget=forms.PasswordInput, label='Password', required=True)
class Meta:
model = User
fields = ('password',)
def __init__(self, *args, **kwargs):
self.user =kwargs.pop('user')
super(InvitationAcceptForm, self).__init__(*args, **kwargs)
def clean(self):
if 'password' not in self.cleaned_data:
self.errors['password'] = self.error_class()
self.add_error('password', 'Password is required.')
elif not self.user.check_password(self.cleaned_data["password"]):
self.errors['password'] = self.error_class()
self.add_error('password', 'Password is incorrect.')
else:
self.confirm_login_allowed(self.user)
return self.cleaned_data
class ProfileSettingsForm(UserChangeForm):
first_name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control setting_input'}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control setting_input'}))
class Meta:
model = User
fields = ('first_name', 'last_name')
exclude = ('password', 'email')
def clean_password(self):
pass
def clean(self):
cleaned_data = super(ProfileSettingsForm, self).clean()
if 'first_name' not in self.cleaned_data:
self.errors['first_name'] = self.error_class()
self.add_error('first_name', 'First name is required.')
if 'last_name' not in self.cleaned_data:
self.errors['last_name'] = self.error_class()
self.add_error('last_name', 'Last name is required.')
return self.cleaned_data
def save(self, user):
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
user.save()
return user
class PreferencesSettingsForm(forms.Form):
# TODO: Add language, audio thumbnail, document thumbnail, exercise thumbnail, html5 thumbnail once implemented
author = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control setting_input'}))
copyright_holder = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control setting_input'}))
license_description = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control setting_input'}))
license = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control setting_change'}), choices=licenses.choices)
mastery_model = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control setting_change'}), choices=exercises.MASTERY_MODELS, label="Mastery at")
m_value = forms.IntegerField(required=False, widget=forms.NumberInput(attrs={'class': 'form-control setting_input setting_change'}), label="M")
n_value = forms.IntegerField(required=False, widget=forms.NumberInput(attrs={'class': 'form-control setting_input setting_change'}), label="N")
auto_derive_video_thumbnail = forms.BooleanField(initial=True, required=False, widget=forms.CheckboxInput(attrs={'class': 'setting_change'}), label="Videos")
auto_derive_audio_thumbnail = forms.BooleanField(initial=True, required=False, widget=forms.CheckboxInput(attrs={'class': 'setting_change'}), label="Audio")
auto_derive_document_thumbnail = forms.BooleanField(initial=True, required=False, widget=forms.CheckboxInput(attrs={'class': 'setting_change'}), label="Documents")
auto_derive_html5_thumbnail = forms.BooleanField(initial=True, required=False, widget=forms.CheckboxInput(attrs={'class': 'setting_change'}), label="HTML Apps")
auto_randomize_questions = forms.BooleanField(initial=True, required=False, widget=forms.CheckboxInput(attrs={'class': 'setting_change'}), label="Automatically randomize question order")
class Meta:
model = User
fields = ('author', 'copyright_holder', 'license', 'license_description', 'mastery_model', 'm_value', 'n_value', 'auto_derive_video_thumbnail', 'auto_randomize_questions')
def save(self, user):
user.preferences = json.dumps({
'author': self.cleaned_data["author"] or "",
'copyright_holder': self.cleaned_data["copyright_holder"],
'license': self.cleaned_data["license"],
'license_description': self.cleaned_data['license_description'] if self.cleaned_data['license'] == 'Special Permissions' else None,
'mastery_model': self.cleaned_data["mastery_model"],
'auto_randomize_questions': self.cleaned_data["auto_randomize_questions"],
'auto_derive_video_thumbnail': self.cleaned_data["auto_derive_video_thumbnail"],
'auto_derive_audio_thumbnail': self.cleaned_data["auto_derive_audio_thumbnail"],
'auto_derive_document_thumbnail': self.cleaned_data["auto_derive_document_thumbnail"],
'auto_derive_html5_thumbnail': self.cleaned_data["auto_derive_html5_thumbnail"],
'm_value': self.cleaned_data["m_value"],
'n_value': self.cleaned_data["n_value"],
})
user.save()
return user
class AccountSettingsForm(PasswordChangeForm):
old_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control setting_input'}))
new_password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control setting_input'}))
new_password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control setting_input'}))
class Meta:
model = User
fields = ('old_password', 'new_password1', 'new_password2')
def clean(self):
cleaned_data = super(AccountSettingsForm, self).clean()
self.check_field('old_password', 'Current password is incorrect.')
if self.check_field('new_password1', 'New password is required.'):
if 'new_password2' not in self.cleaned_data or self.cleaned_data['new_password1'] != self.cleaned_data['new_password2']:
self.errors['new_password2'] = self.error_class()
self.add_error('new_password2', 'New passwords don\'t match.')
else:
self.errors['new_password2'] = self.error_class()
return self.cleaned_data
def check_field(self, field, error):
if field not in self.cleaned_data:
self.errors[field] = self.error_class()
self.add_error(field, error)
return False
return True
| {
"content_hash": "c1caa7cda33380609e2aacaf05b1054d",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 190,
"avg_line_length": 47.229665071770334,
"alnum_prop": 0.6572788977813798,
"repo_name": "jonboiser/content-curation",
"id": "cb7de1b2bf6d6e47f7a1babf471c1e150af179ac",
"size": "9871",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "contentcuration/contentcuration/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111837"
},
{
"name": "HTML",
"bytes": "379797"
},
{
"name": "JavaScript",
"bytes": "333990"
},
{
"name": "Makefile",
"bytes": "792"
},
{
"name": "Python",
"bytes": "359901"
},
{
"name": "Shell",
"bytes": "942"
}
],
"symlink_target": ""
} |
import json
import time
import urllib
from tempest.common.rest_client import RestClient
from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SnapshotsClientJSON(RestClient):
"""Client class to send CRUD Volume API requests."""
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(SnapshotsClientJSON, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.volume.catalog_type
self.build_interval = self.config.volume.build_interval
self.build_timeout = self.config.volume.build_timeout
def list_snapshots(self, params=None):
"""List all the snapshot."""
url = 'snapshots'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def list_snapshots_with_detail(self, params=None):
"""List the details of all snapshots."""
url = 'snapshots/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def get_snapshot(self, snapshot_id):
"""Returns the details of a single snapshot."""
url = "snapshots/%s" % str(snapshot_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshot']
def create_snapshot(self, volume_id, **kwargs):
"""
Creates a new snapshot.
volume_id(Required): id of the volume.
force: Create a snapshot even if the volume attached (Default=False)
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
post_body = {'volume_id': volume_id}
post_body.update(kwargs)
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body, self.headers)
body = json.loads(body)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
"""Updates a snapshot."""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body,
self.headers)
body = json.loads(body)
return resp, body['snapshot']
# NOTE(afazekas): just for the wait function
def _get_snapshot_status(self, snapshot_id):
resp, body = self.get_snapshot(snapshot_id)
status = body['status']
# NOTE(afazekas): snapshot can reach an "error"
# state in a "normal" lifecycle
if (status == 'error'):
raise exceptions.SnapshotBuildErrorException(
snapshot_id=snapshot_id)
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_snapshot_status(self, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
old_value = value = self._get_snapshot_status(snapshot_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if (value == status):
return value
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_snapshot_status(snapshot_id)
def delete_snapshot(self, snapshot_id):
"""Delete Snapshot."""
return self.delete("snapshots/%s" % str(snapshot_id))
def is_resource_deleted(self, id):
try:
self.get_snapshot(id)
except exceptions.NotFound:
return True
return False
def reset_snapshot_status(self, snapshot_id, status):
"""Reset the specified snapshot's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body,
self.headers)
return resp, body
def update_snapshot_status(self, snapshot_id, status, progress):
"""Update the specified snapshot's status."""
post_body = {
'status': status,
'progress': progress
}
post_body = json.dumps({'os-update_snapshot_status': post_body})
url = 'snapshots/%s/action' % str(snapshot_id)
resp, body = self.post(url, post_body, self.headers)
return resp, body
def create_snapshot_metadata(self, snapshot_id, metadata):
"""Create metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.post(url, put_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def get_snapshot_metadata(self, snapshot_id):
"""Get metadata of the snapshot."""
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.get(url, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata(self, snapshot_id, metadata):
"""Update metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.put(url, put_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
"""Update metadata item for the snapshot."""
put_body = json.dumps({'meta': meta_item})
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.put(url, put_body, self.headers)
body = json.loads(body)
return resp, body['meta']
def delete_snapshot_metadata_item(self, snapshot_id, id):
"""Delete metadata item for the snapshot."""
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.delete(url, self.headers)
return resp, body
| {
"content_hash": "e53aaaff36b59b75bcb0cf58c527be75",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 38.839080459770116,
"alnum_prop": 0.5836046167505179,
"repo_name": "BeenzSyed/tempest",
"id": "1a34898f957c5cbbc38e750e6e3502bac358db59",
"size": "7331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/services/volume/json/snapshots_client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2613370"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
} |
from ...ags._geoprocessing import *
from ..._abstract import abstract
########################################################################
class analysis(abstract.BaseAGOLClass):
"""
ArcGIS Online is a collaborative, cloud-based platform that lets
members of an organization create, share, and access maps,
applications, and data, including authoritative basemaps published
by Esri. Through ArcGIS Online, you get access to Esri's secure
cloud, where you can manage, create, store, and access hosted web
services.
ArcGIS Online includes the Spatial Analysis service. The Spatial
Analysis service contains a number of tasks, listed below, that you
can access and use in your applications. Using Spatial Analysis
service tasks consumes credits. For more information on credits, see
see Service credits overview which includes access to an interactive
Service Credits Estimator.
Site Reference: https://developers.arcgis.com/rest/analysis/
Inputs:
securityHandler - ArcGIS Online security handler object
url - optional url to the site.
ex: http://www.arcgis.com/sharing/rest
proxy_url - optional proxy IP
proxy_port - optional proxy port required if proxy_url specified
Basic Usage:
import arcrest
import arcrest.agol as agol
if __name__ == "__main__":
username = "username"
password = "password"
sh = arcrest.AGOLTokenSecurityHandler(username, password)
a = agol.analysis(securityHandler=sh)
for task in a.tasks:
if task.name.lower() == "aggregatepoints":
for params in task.parameters:
print params
"""
_proxy_url = None
_proxy_port = None
_url = None
_analysis_url = None
_securityHandler = None
_gpService = None
#----------------------------------------------------------------------
def __init__(self,
securityHandler,
url=None,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url is None:
self._url = "https://www.arcgis.com/sharing/rest"
else:
if url.find("/sharing/rest") == -1:
url = url + "/sharing/rest"
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self.__init_url()
#----------------------------------------------------------------------
def __init_url(self):
"""loads the information into the class"""
portals_self_url = "{}/portals/self".format(self._url)
params = {
"f" :"json"
}
res = self._do_get(url=portals_self_url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if "helperServices" in res:
helper_services = res.get("helperServices")
if "analysis" in helper_services:
analysis_service = helper_services.get("analysis")
if "url" in analysis_service:
self._analysis_url = analysis_service.get("url")
self._gpService = GPService(url=self._analysis_url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
#----------------------------------------------------------------------
@property
def gpService(self):
"""returns the geoprocessing object"""
if self._gpService is None:
self.__init_url()
return self._gpService
#----------------------------------------------------------------------
@property
def tasks(self):
"""returns the available analysis tasks"""
return self.gpService.tasks
| {
"content_hash": "ae0b663bff1c548f7e3c75fc8e851b0a",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 75,
"avg_line_length": 41.81818181818182,
"alnum_prop": 0.5246376811594203,
"repo_name": "jgravois/ArcREST",
"id": "4d734fec16834bd31f1e72edebedceac08bc00d8",
"size": "4140",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/arcrest/agol/helperservices/analysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1591951"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from django.utils.unittest import skipUnless
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| {
"content_hash": "7f1fe9eb218a6be46cfbfd1197f8d7bd",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 155,
"avg_line_length": 48.68078175895766,
"alnum_prop": 0.6396788223486116,
"repo_name": "eltonsantos/django",
"id": "6320edcff5d9dbf078fe9277e099c24a5db7a984",
"size": "14945",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/contrib/gis/tests/relatedapp/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Unit test suite for the docx.opc.parts.coreprops module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from datetime import datetime, timedelta
import pytest
from docx.opc.coreprops import CoreProperties
from docx.opc.parts.coreprops import CorePropertiesPart
from docx.oxml.parts.coreprops import CT_CoreProperties
from ...unitutil.mock import class_mock, instance_mock
class DescribeCorePropertiesPart(object):
def it_provides_access_to_its_core_props_object(self, coreprops_fixture):
core_properties_part, CoreProperties_ = coreprops_fixture
core_properties = core_properties_part.core_properties
CoreProperties_.assert_called_once_with(core_properties_part.element)
assert isinstance(core_properties, CoreProperties)
def it_can_create_a_default_core_properties_part(self):
core_properties_part = CorePropertiesPart.default(None)
assert isinstance(core_properties_part, CorePropertiesPart)
core_properties = core_properties_part.core_properties
assert core_properties.title == 'Word Document'
assert core_properties.last_modified_by == 'python-docx'
assert core_properties.revision == 1
delta = datetime.utcnow() - core_properties.modified
max_expected_delta = timedelta(seconds=2)
assert delta < max_expected_delta
# fixtures ---------------------------------------------
@pytest.fixture
def coreprops_fixture(self, element_, CoreProperties_):
core_properties_part = CorePropertiesPart(None, None, element_, None)
return core_properties_part, CoreProperties_
# fixture components -----------------------------------
@pytest.fixture
def CoreProperties_(self, request):
return class_mock(request, 'docx.opc.parts.coreprops.CoreProperties')
@pytest.fixture
def element_(self, request):
return instance_mock(request, CT_CoreProperties)
| {
"content_hash": "fc2f3d702b57e623f1db1820185050fa",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 36.629629629629626,
"alnum_prop": 0.6976744186046512,
"repo_name": "holli-holzer/python-docx",
"id": "f324f15db65a63a7b9be4e97a2b6ce809fffcbaa",
"size": "1997",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/opc/parts/test_coreprops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6692"
},
{
"name": "Python",
"bytes": "783349"
}
],
"symlink_target": ""
} |
"""
=======
Entropy
=======
In information theory, information entropy is the log-base-2 of the number of
possible outcomes for a message.
For an image, local entropy is related to the complexity contained in a given
neighborhood, typically defined by a structuring element. The entropy filter can
detect subtle variations in the local gray level distribution.
In the first example, the image is composed of two surfaces with two slightly
different distributions. The image has a uniform random distribution in the
range [-14, +14] in the middle of the image and a uniform random distribution in
the range [-15, 15] at the image borders, both centered at a gray value of 128.
To detect the central square, we compute the local entropy measure using a
circular structuring element of a radius big enough to capture the local gray
level distribution. The second example shows how to detect texture in the camera
image using a smaller structuring element.
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
# First example: object detection.
noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30
noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
noise_mask).astype(np.uint8)
img = noise + 128
entr_img = entropy(img, disk(10))
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 3))
ax0.imshow(noise_mask, cmap=plt.cm.gray)
ax0.set_xlabel("Noise mask")
ax1.imshow(img, cmap=plt.cm.gray)
ax1.set_xlabel("Noisy image")
ax2.imshow(entr_img)
ax2.set_xlabel("Local entropy")
fig.tight_layout()
# Second example: texture detection.
image = img_as_ubyte(data.camera())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 4), sharex=True,
sharey=True,
subplot_kw={"adjustable": "box-forced"})
img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)
img1 = ax1.imshow(entropy(image, disk(5)), cmap=plt.cm.gray)
ax1.set_title("Entropy")
ax1.axis("off")
fig.colorbar(img1, ax=ax1)
fig.tight_layout()
plt.show()
| {
"content_hash": "42e7b553f5355be8bf664a61f23fa213",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 30.602739726027398,
"alnum_prop": 0.7211280214861235,
"repo_name": "ofgulban/scikit-image",
"id": "248436fb16c8d5bf79e8b56848bc9e4bc5a259d0",
"size": "2234",
"binary": false,
"copies": "9",
"ref": "refs/heads/ncut-rag-options",
"path": "doc/examples/filters/plot_entropy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "235642"
},
{
"name": "C++",
"bytes": "44817"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "2535247"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('foodroller', '0007_auto_20160926_1334'),
]
operations = [
migrations.AddField(
model_name='category',
name='user',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='food',
name='user',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='foodplan',
name='user',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "ec6ca4b3bec99b8b698752699482f652",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 137,
"avg_line_length": 34.54838709677419,
"alnum_prop": 0.6414565826330533,
"repo_name": "zenith0/foodroller",
"id": "1814576d40cab1a1654c70f13251a70f8f0c6cf5",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foodroller/migrations/0008_auto_20160927_1129.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "79506"
},
{
"name": "HTML",
"bytes": "36105"
},
{
"name": "JavaScript",
"bytes": "145943"
},
{
"name": "Python",
"bytes": "68972"
}
],
"symlink_target": ""
} |
from email.MIMEText import MIMEText
message = """ Hello,
This is a test message from Chapter 9. I hope you enjoy it!
-- Anonymous"""
msg = MIMEText(message)
msg['TO'] = '[email protected]'
msg['FROM'] = '[email protected]'
msg['Subject'] = 'Test Message, Chapter 9'
print msg.as_string()
| {
"content_hash": "1759bee9f6ba158e7691fccd530543e7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 26.181818181818183,
"alnum_prop": 0.6944444444444444,
"repo_name": "chenzhengchen200821109/github-python",
"id": "71ae8256b36a83907e0675dc8f1a3674349c1427",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trad_gen_simple.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27232"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/engine/shared_eng_sfs_modified_imperial_3.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","eng_sfs_modified_imperial_3_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "ccef2376ef16bfa54f24176d59ba8ae9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 98,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.7146892655367232,
"repo_name": "anhstudios/swganh",
"id": "a87110c4e6af6629c18df43a449be2092863f241",
"size": "499",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/components/engine/shared_eng_sfs_modified_imperial_3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
'''
Created on 2013-8-8
@author: lan (www.9miao.com)
'''
from gfirefly import management
import sys
if __name__ == "__main__":
args = sys.argv
management.execute_commands(*args) | {
"content_hash": "3d923c9a34b12a2a6218e2823dde3fc9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 17,
"alnum_prop": 0.6524064171122995,
"repo_name": "yangdw/repo.python",
"id": "96087b66a223a23a4a7eb112d9bde753a3c00de5",
"size": "200",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/annotation/gfirefly/gfirefly/gfirefly/script/gfirefly-admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2746"
},
{
"name": "Protocol Buffer",
"bytes": "11054"
},
{
"name": "Python",
"bytes": "1549455"
},
{
"name": "SQLPL",
"bytes": "3364"
}
],
"symlink_target": ""
} |
import atexit
import os
import shutil
import tempfile
# Put gen_py cache in temp directory.
supportdir = tempfile.mkdtemp()
# gen_py has to be put into directory 'gen_py'.
genpydir = os.path.join(supportdir, 'gen_py')
# Create 'gen_py' directory. This directory does not need
# to contain '__init__.py' file.
try:
# win32com gencache cannot be put directly to 'supportdir' with any
# random name. It has to be put in a directory called 'gen_py'.
# This is the reason why to create this directory in supportdir'.
os.makedirs(genpydir)
# Remove temp directory at application exit and ignore any errors.
atexit.register(shutil.rmtree, supportdir, ignore_errors=True)
except OSError:
pass
# Override the default path to gen_py cache.
import win32com
win32com.__gen_path__ = genpydir
# The attribute __loader__ makes module 'pkg_resources' working but On Windows
# it breaks pywin32 (win32com) and test 'basic/test_pyttsx' will fail. Just
# removing that attribute for win32com fixes that and gencache is created properly.
if hasattr(win32com, '__loader__'):
del win32com.__loader__
# Ensure genpydir is in 'gen_py' module paths.
import win32com.gen_py
win32com.gen_py.__path__.insert(0, genpydir)
| {
"content_hash": "3473c4199695744e523e0147f99f072e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 83,
"avg_line_length": 30.875,
"alnum_prop": 0.7319838056680162,
"repo_name": "deandunbar/bitwave",
"id": "e37b73fbcf9a53f41bd1a3e35ebc1463358519ac",
"size": "2127",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "hackathon_version/venv/lib/python2.7/site-packages/PyInstaller/loader/rthooks/pyi_rth_win32comgenpy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6018"
},
{
"name": "CSS",
"bytes": "53871"
},
{
"name": "HTML",
"bytes": "1159945"
},
{
"name": "JavaScript",
"bytes": "531995"
},
{
"name": "Makefile",
"bytes": "120304"
},
{
"name": "Python",
"bytes": "9014662"
},
{
"name": "Shell",
"bytes": "3833"
},
{
"name": "TeX",
"bytes": "164573"
}
],
"symlink_target": ""
} |
"""Upload Expressions."""
import gzip
import io
import json
from pathlib import Path
import pandas as pd
from resolwe.process import (
DataField,
FileField,
JsonField,
Persistence,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
def parse_expression_file(exp_file, exp_type):
"""Parse expression file to a Pandas dataframe."""
with gzip.open(exp_file) as exp:
df = pd.read_csv(exp, sep="\t", float_precision="round_trip")
df.rename(
index=str,
columns={
"Gene": "FEATURE_ID",
"Expression": exp_type,
},
inplace=True,
)
# Cast FEATURE_ID column to string
df["FEATURE_ID"] = df["FEATURE_ID"].astype("str")
# Remove any possible empty rows from the input file
df.dropna(inplace=True)
return df
def prepare_expression_set(exp, exp_type, feature_dict, outfile_name, rc=None):
"""Prepare expression set output data."""
exp = parse_expression_file(exp_file=exp, exp_type=exp_type)
exp["GENE_SYMBOL"] = exp["FEATURE_ID"].map(feature_dict)
input_features = exp["FEATURE_ID"].tolist()
# Check if all of the input feature IDs could be mapped to the gene symbols
if not all(f_id in feature_dict for f_id in input_features):
print(
f"{sum(exp.isnull().values.ravel())} feature(s) "
f"could not be mapped to the associated feature symbols."
)
# Merge expression values and reorder columns
if rc:
rc = parse_expression_file(exp_file=rc, exp_type="RAW_COUNT")
exp_set = exp.merge(rc, on="FEATURE_ID")
columns = ["FEATURE_ID", "GENE_SYMBOL", "RAW_COUNT", exp_type]
else:
exp_set = exp
columns = ["FEATURE_ID", "GENE_SYMBOL", exp_type]
exp_set = exp_set[columns]
# Replace NaN values with empty string
exp_set.fillna("", inplace=True)
# Write to file
exp_set.to_csv(
outfile_name + ".txt.gz",
header=True,
index=False,
sep="\t",
compression="gzip",
)
# Write to JSON
df_dict = exp_set.set_index("FEATURE_ID").to_dict(orient="index")
with open(outfile_name + ".json", "w") as f:
json.dump({"genes": df_dict}, f, allow_nan=False)
def expression_to_storage(infile, outfile):
"""Convert expressions file to JSON format."""
def isfloat(value):
"""Check if value is float."""
try:
float(value)
return True
except ValueError:
return False
with io.TextIOWrapper(io.BufferedReader(gzip.open(infile))) as f:
# Split lines by tabs
# Ignore lines without a number in second column
# Build a dictionary of gene-expression pairs
exp = {
"genes": {
gene_exp[0]: float(gene_exp[1])
for gene_exp in (l.split("\t") for l in f)
if len(gene_exp) == 2 and isfloat(gene_exp[1])
}
}
with open(file=outfile, mode="wt") as f:
json.dump(exp, f)
return outfile
def replace_extension(infile):
"""Replace extensions of file."""
extensions = "".join(Path(str(infile)).suffixes[-2:])
new_ext = ".tab.gz"
outfile = str(infile).replace(extensions, new_ext)
return outfile
class UploadExpression(ProcessBio):
"""Upload expression data.
Upload expression data by providing raw expression data (read counts)
and/or normalized expression data together with the associated data
normalization type.
"""
slug = "upload-expression"
name = "Expression data"
process_type = "data:expression"
version = "2.6.0"
category = "Import"
data_name = "{{ exp_name }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
},
}
class Input:
"""Input fields to process UploadExpression."""
rc = FileField(
label="Read counts (raw expression)",
description="Reads mapped to genomic features (raw count data). "
"Supported extensions: .txt.gz (preferred), .tab.*, .txt.* or .tsv.*",
required=False,
)
exp = FileField(
label="Normalized expression",
description="Normalized expression data. Supported extensions: .tab.gz "
"(preferred), .tab.*, .txt.* or .tsv.*",
required=False,
)
exp_name = StringField(
label="Expression name",
)
exp_type = StringField(
label="Normalization type",
description="Normalization type",
required=False,
)
source = StringField(
label="Gene ID source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
species = StringField(
label="Species",
description="Species latin name.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
build = StringField(
label="Build", description="Genome build or annotation version."
)
feature_type = StringField(
label="Feature type",
allow_custom_choice=True,
default="gene",
choices=[
("gene", "gene"),
("transcript", "transcript"),
("exon", "exon"),
],
)
class Output:
"""Output fields to process UploadExpression."""
exp = FileField(label="Normalized expression")
rc = FileField(
label="Read counts",
required=False,
description="Reads mapped to genomic features.",
)
exp_json = JsonField(label="Expression (json)")
exp_type = StringField(label="Expression type")
exp_set = FileField(label="Expressions")
exp_set_json = JsonField(label="Expressions (json)")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run analysis."""
supported_extensions = (".txt", ".tab", ".tsv")
if not inputs.exp and not inputs.rc:
self.error("Please provide raw or/and normalized expression files.")
elif inputs.exp and not inputs.exp_type:
self.error(
"Please provide normalization type together with normalized expressions."
)
elif not inputs.exp and inputs.exp_type and inputs.rc:
self.error("Please provide raw or/and normalized expression files.")
elif inputs.rc and not inputs.exp and not inputs.exp_type:
rc = inputs.rc.import_file(imported_format="compressed")
exp = inputs.rc.import_file(imported_format="compressed")
exp_type = "RAW_COUNT"
stem = Path(rc).stem
elif inputs.exp and inputs.exp_type and not inputs.rc:
exp = inputs.exp.import_file(imported_format="compressed")
stem = Path(exp).stem
exp_type = inputs.exp_type
else:
rc = inputs.rc.import_file(imported_format="compressed")
exp = inputs.exp.import_file(imported_format="compressed")
stem = Path(rc).stem
stem_exp = Path(exp).stem
if not stem_exp.endswith(supported_extensions):
self.error(
f"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
exp_type = inputs.exp_type
if not stem.endswith(supported_extensions):
self.error(
"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
name = stem[:-4]
# Save the abundance estimates to JSON storage
expression_to_storage(infile=exp, outfile="json.txt")
# Prepare the expression set outputs
feature_ids = pd.read_csv(exp, sep="\t", index_col="Gene").index.tolist()
feature_filters = {
"source": inputs.source,
"species": inputs.species,
"feature_id__in": feature_ids,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
if inputs.rc and inputs.exp:
prepare_expression_set(
exp=exp,
exp_type=exp_type,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
rc=rc,
)
else:
prepare_expression_set(
exp=exp,
exp_type=exp_type,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
)
# Change suffixes of exp file
exp_final = replace_extension(infile=exp)
Path(exp).rename(exp_final)
exp = Path(exp_final).name
if inputs.rc and inputs.exp:
# Change suffixes of rc file
rc_final = replace_extension(infile=rc)
Path(rc).rename(rc_final)
rc = Path(rc_final).name
outputs.rc = rc
elif inputs.rc and not inputs.exp:
rc = exp
outputs.rc = rc
outputs.exp_type = exp_type
outputs.exp = exp
outputs.exp_json = "json.txt"
outputs.exp_set = f"{name}_expressions.txt.gz"
outputs.exp_set_json = f"{name}_expressions.json"
outputs.source = inputs.source
outputs.species = inputs.species
outputs.build = inputs.build
outputs.feature_type = inputs.feature_type
class UploadExpressionCuffnorm(ProcessBio):
"""Upload expression data by providing Cuffnorm results."""
slug = "upload-expression-cuffnorm"
name = "Expression data (Cuffnorm)"
process_type = "data:expression"
version = "1.8.0"
category = "Import"
data_name = '{{ exp.file|default("?") }}'
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
},
}
class Input:
"""Input fields for UploadExpressionCuffnorm."""
exp = FileField(label="Normalized expression")
cxb = DataField(
"cufflinks:cuffquant",
label="Cuffquant analysis",
description="Cuffquant analysis.",
)
exp_type = StringField(
label="Normalization type",
default="Cuffnorm",
)
class Output:
"""Output fields for UploadExpressionCuffnorm."""
exp = FileField(
label="Normalized expression",
description="Normalized expression",
)
exp_json = JsonField(
label="Expression (json)",
)
exp_type = StringField(
label="Expression type",
)
exp_set = FileField(
label="Expressions",
)
exp_set_json = JsonField(
label="Expressions (json)",
)
source = StringField(
label="Gene ID source",
)
species = StringField(label="Species")
build = StringField(
label="Build",
)
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run analysis."""
if inputs.exp and not inputs.exp_type:
self.error(
"Please provide normalization type together with normalized expressions."
)
elif inputs.exp and inputs.exp_type and inputs.cxb:
exp = inputs.exp.import_file(imported_format="compressed")
stem = Path(exp).stem
name = stem[:-4]
# Save the abundance estimates to JSON storage
expression_to_storage(infile=exp, outfile="json.txt")
# Prepare the expression set outputs
feature_ids = pd.read_csv(exp, sep="\t", index_col="Gene").index.tolist()
feature_filters = {
"source": inputs.cxb.output.source,
"species": inputs.cxb.output.species,
"feature_id__in": feature_ids,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
prepare_expression_set(
exp=exp,
exp_type=inputs.exp_type,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
)
outputs.exp_type = inputs.exp_type
outputs.exp = exp
outputs.exp_json = "json.txt"
outputs.exp_set = f"{name}_expressions.txt.gz"
outputs.exp_set_json = f"{name}_expressions.json"
outputs.source = inputs.cxb.output.source
outputs.species = inputs.cxb.output.species
outputs.build = inputs.cxb.output.build
outputs.feature_type = "gene"
| {
"content_hash": "b9595988841b44d408467903c8ec072f",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 89,
"avg_line_length": 32.29478458049886,
"alnum_prop": 0.5535037213874456,
"repo_name": "genialis/resolwe-bio",
"id": "68d0ffc15f7a9488c876f1c016eef1352b7ee60e",
"size": "14242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resolwe_bio/processes/import_data/expressions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10553"
},
{
"name": "PLpgSQL",
"bytes": "4491"
},
{
"name": "Python",
"bytes": "1729619"
},
{
"name": "R",
"bytes": "20619"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
def get_sync_indel_window(wildcards):
return params["popoolation2"]["find_indels"]["indel_window"]
def get_sync_min_count(wildcards):
return params["popoolation2"]["find_indels"]["min_count"]
def compose_mpileups_comma(wildcards):
chromosome = wildcards.chromosome
mpileups = [
MPILEUP_RAW + population + "/" + population + "." + chromosome +
".mpileup.gz"
for population in POPULATIONS
]
composed = "{" + ",".join(mpileups) + "}"
return composed
def get_sync_min_qual(wildcards):
return params["popoolation2"]["subsample"]["min_qual"]
def get_sync_target_coverage(wildcards):
return params["popoolation2"]["subsample"]["target_coverage"]
def compose_max_coverages(wildcards):
coverages = (
samples
[["population", "max_coverage"]]
.drop_duplicates()
["max_coverage"]
.values
.tolist()
)
coverages = map(str, coverages)
return ",".join(coverages)
def get_sync_subsample_method(wildcards):
return params["popoolation2"]["subsample"]["method"]
rule sync_identify_indels:
"""
Identify indels like in mpileup_popoolation_identify_indels, but all
together
"""
input:
mpileups = expand(
MPILEUP_RAW + "{population}/{population}.{chromosome}.mpileup.gz",
population=POPULATIONS,
chromosome="{chromosome}"
)
output:
gtf = temp(SYNC_FILT + "{chromosome}.gtf")
params:
indel_window = get_sync_indel_window,
min_count = get_sync_indel_window,
mpileups_comma = compose_mpileups_comma
log:
SYNC_FILT + "{chromosome}.identify_indels.log"
benchmark:
SYNC_FILT + "{chromosome}.identify_indels.bmk"
conda:
"sync.yml"
shell:
"""
(eval \
paste <(gzip -dc {input.mpileups[0]} | cut -f 1-3) \
'<(gzip -dc '{params.mpileups_comma}' | cut -f 4-6 )' \
| perl src/popoolation2_1201/indel_filtering/\
identify-indel-regions.pl \
--input /dev/stdin \
--output {output.gtf} \
--indel-window {params.indel_window} \
--min-count {params.min_count}) \
2> {log} 1>&2
"""
rule sync_filter_indels:
"""Filter indels from the joint mpileup"""
input:
mpileups = expand(
MPILEUP_RAW + "{population}/{population}.{chromosome}.mpileup.gz",
population=POPULATIONS,
chromosome="{chromosome}"
),
gtf = SYNC_FILT + "{chromosome}.gtf"
output:
mpileup_fifo = temp(SYNC_FILT + "{chromosome}.mpileup"),
mpileup_gz = SYNC_FILT + "{chromosome}.mpileup.gz"
params:
mpileups_comma = compose_mpileups_comma
log:
SYNC_FILT + "{chromosome}.filter_indels.log"
benchmark:
SYNC_FILT + "{chromosome}.filter_indels.bmk"
conda:
"sync.yml"
shell:
"""
mkfifo {output.mpileup_fifo}
(cat {output.mpileup_fifo} | gzip --fast > {output.mpileup_gz} &)
(eval \
paste <(gzip -dc {input.mpileups[0]} | cut -f 1-3) \
'<(gzip -dc '{params.mpileups_comma}' | cut -f 4-6 )' \
| perl src/popoolation2_1201/indel_filtering/filter-sync-by-gtf.pl \
--input /dev/stdin \
--gtf {input.gtf} \
--output {output.mpileup_fifo}) \
2> {log} 1>&2
"""
rule sync_mpileup2sync:
"""Convert joint mpileup to sync
- mpileup2sync returns error always, and that is why there is a || true.
- Next step requires a proper file
"""
input:
mpileup_gz = SYNC_FILT + "{chromosome}.mpileup.gz"
output:
sync = temp(SYNC_RAW + "{chromosome}.sync") # TEMP!
params:
min_qual = get_sync_min_qual,
mpileup2sync = "src/popoolation2_1201/mpileup2sync.jar"
threads: # Required for java not doing more than needed
1
log:
SYNC_RAW + "{chromosome}.log"
benchmark:
SYNC_RAW + "{chromosome}.json"
resources:
memory_gb = params["popoolation2"]["subsample"]["memory_gb"]
conda:
"sync.yml"
shell:
"""
(gzip --decompress --stdout {input.mpileup_gz} \
| java -Xmx{resources.memory_gb}g -jar {params.mpileup2sync} \
--input /dev/stdin \
--output {output.sync} \
--fastq-type sanger \
--min-qual {params.min_qual} \
--threads {threads} \
|| true) \
2> {log} 1>&2
"""
rule sync_subsample:
"""
Subsample a sync file.
Note: A proper file is required as input and output.
"""
input:
sync = SYNC_RAW + "{chromosome}.sync"
output:
sync = temp(SYNC_SUB + "{chromosome}.sync")
params:
target_coverage = get_sync_target_coverage,
max_coverage = compose_max_coverages,
method = get_sync_subsample_method
log:
SYNC_SUB + "{chromosome}.log"
benchmark:
SYNC_SUB + "{chromosome}.json"
conda:
"sync.yml"
shell:
"""
perl src/popoolation2_1201/subsample-synchronized.pl \
--input {input.sync} \
--output {output.sync} \
--target-coverage {params.target_coverage} \
--max-coverage {params.max_coverage} \
--method {params.method} \
2> {log} 1>&2
"""
rule sync_compress:
input:
sync = SYNC_SUB + "{chromosome}.sync"
output:
sync_gz = protected(SYNC_SUB + "{chromosome}.sync.gz")
threads:
4
shell:
"pigz --best --keep {output.sync}"
rule sync:
input:
[SYNC_SUB + chromosome + ".sync.gz" for chromosome in CHROMOSOMES]
| {
"content_hash": "05252d5ef0b190bbbe29ca0f120cf079",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 78,
"avg_line_length": 28.43069306930693,
"alnum_prop": 0.56312032039004,
"repo_name": "jlanga/smsk_popoolation",
"id": "f2f0147b4198fd04798765ad7e843e9279fe524d",
"size": "5743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/snakefiles/sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "15487"
},
{
"name": "Perl",
"bytes": "607182"
},
{
"name": "Python",
"bytes": "39328"
},
{
"name": "R",
"bytes": "3647"
},
{
"name": "Shell",
"bytes": "2635"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from chaco.color_bar import *
| {
"content_hash": "fe45f6f7ea4469432927c3da6e8c1471",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 34.5,
"alnum_prop": 0.7681159420289855,
"repo_name": "enthought/etsproxy",
"id": "88085a075617230efd3136f3e0351463df4db461",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/chaco/color_bar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow and Renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetAlphaBitPlanes(1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create a default polygonal sphere
sphere = vtk.vtkSphereSource()
sphmapper = vtk.vtkPolyDataMapper()
sphmapper.SetInputConnection(sphere.GetOutputPort())
sphactor = vtk.vtkActor()
sphactor.SetMapper(sphmapper)
# Add the actors to the renderer, set the background to initial
# color (which is also transparent), set size.
ren1.AddActor(sphactor)
ren1.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(256, 256)
# render first image
renWin.Render()
if 0 == renWin.GetAlphaBitPlanes():
print "Failed to find a visual with alpha bit planes."
exit(0)
else:
print "GetAlphaBitPlanes: " + str(renWin.GetAlphaBitPlanes())
# create window to image filter, grabbing RGB and alpha
w2i = vtk.vtkWindowToImageFilter()
w2i.SetInput(renWin)
w2i.SetInputBufferTypeToRGBA()
# grab window
w2i.Update()
# copy the output
outputData = w2i.GetOutput().NewInstance()
outputData.DeepCopy(w2i.GetOutput())
# set up mappers and actors to display the image
im = vtk.vtkImageMapper()
im.SetColorWindow(255)
im.SetColorLevel(127.5)
im.SetInputData(outputData)
ia2 = vtk.vtkActor2D()
ia2.SetMapper(im)
# now, change the image (background is now green)
sphactor.SetScale(2, 2, 2)
ren1.SetBackground(0, 1, 0)
# add the image of the sphere (keeping the original sphere too)
ren1.AddActor(ia2)
ren1.SetViewport(0, 0, 1, 1)
# render result (the polygonal sphere appears behind a smaller image
# of itself). Background of original image is transparent, so you
# can see through it back to the larger sphere and new background.
renWin.Render()
#iren.Start();
| {
"content_hash": "d40af3a3003d83d8411ddd9845a2e1d8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 68,
"avg_line_length": 26.43661971830986,
"alnum_prop": 0.7671816728822589,
"repo_name": "hendradarwin/VTK",
"id": "6cb4afd2a72855bce6c6c612576b818eef35d388",
"size": "1899",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "Rendering/Core/Testing/Python/TestWindowToImageTransparency.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "Bison",
"bytes": "174503"
},
{
"name": "C",
"bytes": "51910235"
},
{
"name": "C++",
"bytes": "67775294"
},
{
"name": "CSS",
"bytes": "186729"
},
{
"name": "Cuda",
"bytes": "29062"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Java",
"bytes": "196895"
},
{
"name": "JavaScript",
"bytes": "1111664"
},
{
"name": "Objective-C",
"bytes": "93926"
},
{
"name": "Objective-C++",
"bytes": "257535"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "173168"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "15934475"
},
{
"name": "Shell",
"bytes": "61955"
},
{
"name": "Tcl",
"bytes": "1887699"
}
],
"symlink_target": ""
} |
class Other(object):
def override(self):
print("OTHER override()")
def implicit(self):
print("OTHER implicit()")
def altered(self):
print("OTHER altered()")
class Child(object):
def __init__(self):
self.other = Other()
def implicit(self):
self.other.implicit()
def override(self):
print("CHILD override()")
def altered(self):
print("CHILD, BEFORE OTHER altered()")
self.other.altered()
print("CHILD, AFTER OTHER altered()")
son = Child()
son.implicit()
son.override()
son.altered()
| {
"content_hash": "e8dbf5574b2238ad628b840dd68de86f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 46,
"avg_line_length": 18.5,
"alnum_prop": 0.581081081081081,
"repo_name": "Akagi201/learning-python",
"id": "f03ae31d7dac9bff23f4a2bb7938229d74de53a0",
"size": "662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lpthw/ex44.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection,
connections, router, transaction,
)
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation,
)
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.signals import (
class_prepared, post_init, post_save, pre_init, pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
@python_2_unicode_compatible
class Deferred(object):
def __repr__(self):
return str('<Deferred field>')
def __str__(self):
return str('<Deferred field>')
DEFERRED = Deferred()
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
classcell = attrs.pop('__classcell__', None)
if classcell is not None:
new_attrs['__classcell__'] = classcell
new_class = super_new(cls, name, bases, new_attrs)
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes |= set(base.__dict__.keys())
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name." % (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (field.name not in field_names and
field.name not in new_class.__dict__ and
field.name not in inherited_attributes):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added to
# the model at that point.
for index in new_class._meta.indexes:
if not index.name:
index.set_name_with_model(new_class)
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
if not opts.managers or cls._requires_legacy_default_manager():
if any(f.name == 'objects' for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class('objects', manager)
class_prepared.send(sender=cls)
def _requires_legacy_default_manager(cls): # RemovedInDjango20Warning
opts = cls._meta
if opts.manager_inheritance_from_future:
return False
future_default_manager = opts.default_manager
# Step 1: Locate a manager that would have been promoted
# to default manager with the legacy system.
for manager in opts.managers:
originating_model = manager._originating_model
if (cls is originating_model or cls._meta.proxy or
originating_model._meta.abstract):
if manager is not cls._default_manager and not opts.default_manager_name:
warnings.warn(
"Managers from concrete parents will soon qualify as default "
"managers if they appear before any other managers in the "
"MRO. As a result, '{legacy_default_manager}' declared on "
"'{legacy_default_manager_model}' will no longer be the "
"default manager for '{model}' in favor of "
"'{future_default_manager}' declared on "
"'{future_default_manager_model}'. "
"You can redeclare '{legacy_default_manager}' on '{cls}' "
"to keep things the way they are or you can switch to the new "
"behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
legacy_default_manager=manager.name,
legacy_default_manager_model=manager._originating_model._meta.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
opts.default_manager_name = manager.name
opts._expire_cache()
break
# Step 2: Since there are managers but none of them qualified as
# default managers under the legacy system (meaning that there are
# managers from concrete parents that would be promoted under the
# new system), we need to create a new Manager instance for the
# 'objects' attribute as a deprecation shim.
else:
# If the "future" default manager was auto created there is no
# point warning the user since it's basically the same manager.
if not future_default_manager.auto_created:
warnings.warn(
"Managers from concrete parents will soon qualify as "
"default managers. As a result, the 'objects' manager "
"won't be created (or recreated) automatically "
"anymore on '{model}' and '{future_default_manager}' "
"declared on '{future_default_manager_model}' will be "
"promoted to default manager. You can declare "
"explicitly `objects = models.Manager()` on '{cls}' "
"to keep things the way they are or you can switch "
"to the new behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
return True
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
for prop in tuple(kwargs):
try:
# Any remaining kwargs must correspond to properties or
# virtual fields.
if prop in property_names or opts.get_field(prop):
if kwargs[prop] is not _DEFERRED:
_setattr(self, prop, kwargs[prop])
del kwargs[prop]
except (AttributeError, FieldDoesNotExist):
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [values.pop() if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return str('%s object' % self.__class__.__name__)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled model instance's Django version %s does not match "
"the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
db_instance_qs = self.__class__._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields,
)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None and
getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
update_pk = meta.auto_field and not pk_set
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self.__class__._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if (lookup_value is None or
(lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = (
cls._check_id_field() +
cls._check_field_name_clashes() +
cls._check_model_name_db_lookup_clashes()
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith('_') or model_name.endswith('_'):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E023'
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E024'
)
)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model '%s'."
% (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by only one field).",
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if LOOKUP_SEP not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if f.db_column is None and column_name is not None and len(column_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, six.string_types):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(ordered_obj, self):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
curry(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
curry(method_set_order, model)
)
########
# MISC #
########
def model_unpickle(model_id):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| {
"content_hash": "7ca22e9009d9949ae90b315618fbc2da",
"timestamp": "",
"source": "github",
"line_count": 1816,
"max_line_length": 114,
"avg_line_length": 41.684471365638764,
"alnum_prop": 0.5397825598752956,
"repo_name": "sarthakmeh03/django",
"id": "53b761f45ab7543f66f3d551f5231719a58746bd",
"size": "75699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52170"
},
{
"name": "HTML",
"bytes": "174451"
},
{
"name": "JavaScript",
"bytes": "251434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11348046"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import pytest
import os
from polyglotdb.io import inspect_mfa
from polyglotdb import CorpusContext
from polyglotdb.exceptions import TextGridError, GraphQueryError, ParseError
from textgrid import TextGrid, IntervalTier
from polyglotdb.io.parsers.mfa import MfaParser
def test_load_mfa(mfa_test_dir, graph_db):
with CorpusContext('test_mfa', **graph_db) as c:
c.reset()
testFilePath = os.path.join(mfa_test_dir, "mfa_test.TextGrid")
parser = inspect_mfa(testFilePath)
print(parser.speaker_parser)
c.load(parser, testFilePath)
assert (c.hierarchy.has_type_property('word', 'transcription'))
q = c.query_graph(c.word).filter(c.word.label == 'JURASSIC')
print(q)
print(q.all())
q = q.filter(c.word.speaker.name == 'mfa')
#print(c.word.speaker.name)
print(q.all())
q = q.order_by(c.word.begin)
print(q.all())
q = q.columns(c.word.label)
print(q.all())
results = q.all()
assert (len(results) == 1)
c.encode_pauses('<SIL>')
c.encode_utterances(min_pause_length=0)
q = c.query_graph(c.word).filter(c.word.label == 'PLANET')
q = q.filter(c.word.speaker.name == 'mfa')
q = q.order_by(c.word.begin)
q = q.columns(c.word.label, c.word.following.label.column_name('following'))
results = q.all()
assert (len(results) == 1)
assert (results[0]['following'] == 'JURASSIC')
q = c.query_speakers().filter(c.speaker.name == 'mfa')
q = q.columns(c.speaker.discourses.name.column_name('discourses'))
s = q.get()
assert (len(s['discourses']) == 1)
assert (s['discourses'] == ['mfa_test'])
def test_mismatch_parser(timit_test_dir, graph_db):
with CorpusContext('test_mismatch', **graph_db) as c:
c.reset()
parser = inspect_mfa(timit_test_dir)
with pytest.raises(ParseError):
c.load(parser, timit_test_dir)
def test_two_format_parsing(mfa_test_dir, graph_db):
#for file in os.listdir(os.path.abspath(mfa_test_dir)):
# if file.endswith("yes.TextGrid") or file.endswith("no.TextGrid"):
# path = os.path.join(mfa_test_dir, file)
#parser = MfaParser("a", "b")
# curTg = TextGrid()
# curTg.read(path)
#value = parser._is_valid(curTg)
#if file.endswith("yes.TextGrid"):
# assert True
#elif file.endswith("no.TextGrid"):
# assert False
valid_dir = os.path.join(mfa_test_dir, "valid")
invalid_dir = os.path.join(mfa_test_dir, "invalid")
# Check that valids load
with CorpusContext('mfa_valid', **graph_db) as c:
c.reset()
parser = inspect_mfa(valid_dir)
c.load(parser, valid_dir)
# Check that invalids don't
with CorpusContext('mfa_invalid', **graph_db) as c:
c.reset()
parser = inspect_mfa(invalid_dir)
with pytest.raises(ParseError):
c.load(parser, invalid_dir)
| {
"content_hash": "39a8a8fde0aaf5115c5ee7a19cd58ed4",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 84,
"avg_line_length": 33.604395604395606,
"alnum_prop": 0.5964682799215173,
"repo_name": "samihuc/PolyglotDB",
"id": "01718e47750359be3e36cdcbc131cb62ac80c47b",
"size": "3058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_io_mfa.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "794865"
},
{
"name": "Shell",
"bytes": "2326"
}
],
"symlink_target": ""
} |
import argparse
import json
import os.path
import requests
import getpass
try:
input = raw_input
except:
pass
def get_token(username, password):
# the client id we can borrow from azure xplat cli
client_id = '04b07795-8ddb-461a-bbee-02f9e1bf7b46'
grant_type = 'password'
resource = 'https://management.core.windows.net/'
token_url = 'https://login.windows.net/common/oauth2/token'
payload = {
'grant_type': grant_type,
'client_id': client_id,
'username': username,
'password': password,
'resource': resource,
}
response = requests.post(token_url, data=payload).json()
return response['access_token']
front_url = "https://management.azure.com"
front_api_version = "2014-01-01"
def get_tenant_ids(auth_header):
response = requests.get(
"{}/tenants?api-version={}".format(front_url, front_api_version),
headers={
'Authorization': auth_header,
}
).json()
ids = [item['tenantId'] for item in response['value']]
return ids
def get_subscription_ids(auth_header):
response = requests.get(
"{}/subscriptions?api-version={}".format(front_url, front_api_version),
headers={
'Authorization': auth_header,
}
).json()
ids = [item['subscriptionId'] for item in response['value']]
return ids
def choose_subscription(auth_header):
# TODO: this doesn't work, we'll need ADAL for this
# tenants = get_tenant_ids(auth_header)
# print('tenants: {}'.format(tenants))
# subs = get_subscription_ids(auth_header)
# print('subs: {}'.format(subs))
# for now just ask the user to type it
return input('Enter subscription id:')
def write_credentials_file(sub_id, token):
folder = os.path.dirname(__file__)
path = os.path.join(folder, 'credentials_real.json')
credentials = {
'subscriptionid': sub_id,
'authorization_header': 'Bearer {}'.format(token),
}
with open(path, 'w') as f:
f.write(json.dumps(credentials))
return path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', help='User name. Ex: [email protected]')
parser.add_argument('-p', '--password', help='User password')
parser.add_argument('-s', '--subscription', help='Subscription id. Ex: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee')
args = parser.parse_args()
username = args.user
password = args.password
sub_id = args.subscription
if not username:
username = input('Enter username:')
if not password:
password = getpass.getpass('Enter password:')
token = get_token(username, password)
if not sub_id:
auth_header = 'Bearer {}'.format(token)
sub_id = choose_subscription(auth_header)
creds_path = write_credentials_file(sub_id, token)
print('Credentials written to {}'.format(creds_path))
| {
"content_hash": "661630ed6c1c06d45906ce6f5ec98b7d",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 113,
"avg_line_length": 27.39814814814815,
"alnum_prop": 0.6336600202771207,
"repo_name": "crwilcox/azure-sdk-for-python",
"id": "de3cd349778f1bc20c9efa752ca035082b705d99",
"size": "3399",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "azure-common/testutils/create_credentials_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5060"
},
{
"name": "PowerShell",
"bytes": "1040"
},
{
"name": "Python",
"bytes": "2821238"
},
{
"name": "Shell",
"bytes": "1719"
}
],
"symlink_target": ""
} |
file_size = raw_input("Please input the size of the file you want to download:")
download_speed = raw_input("Please input your speed of downloading(KB/S or MB/S):")
| {
"content_hash": "2950d2539eda8b329a0b56bd51574763",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 83,
"avg_line_length": 55.666666666666664,
"alnum_prop": 0.7305389221556886,
"repo_name": "CodeSheng/LPLHW",
"id": "a691b63a777b6e340a8f9b6b60025228e8a1ae8c",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dwtime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25968"
}
],
"symlink_target": ""
} |
import contextlib
import uuid
from eventlet import greenthread
import fixtures
import mock
from mox3 import mox
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from jacket.compute.cloud import flavors
from jacket.compute.cloud import power_state
from jacket.compute.cloud import vm_mode
from jacket import context
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute import test
from jacket.tests.compute.unit import fake_flavor
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit.objects import test_flavor
from jacket.tests.compute.unit.virt.xenapi import stubs
from jacket.tests.compute.unit.virt.xenapi import test_xenapi
from jacket.compute import utils
from jacket.compute.virt import hardware
from jacket.compute.virt.xenapi.client import session as xenapi_session
from jacket.compute.virt.xenapi import driver as xenapi_conn
from jacket.compute.virt.xenapi import fake
from jacket.compute.virt.xenapi import vm_utils
CONF = cfg.CONF
XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server',
'serverpath',
'sr_type'],
'vdi_uuid': 'falseVDI'},
ISCSI_TYPE: {'volume_id': 'fake_volume_id',
'target_lun': 1,
'target_iqn': 'fake_iqn:volume-fake_volume_id',
'target_portal': u'localhost:3260',
'target_discovered': False}, }
return fakes[sr_type]
def _get_fake_session(error=None):
session = mock.Mock()
xenapi_session.apply_session_helpers(session)
if error is not None:
class FakeException(Exception):
details = [error, "a", "b", "c"]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
return session
@contextlib.contextmanager
def contextified(result):
yield result
def _fake_noop(*args, **kwargs):
return
class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
pass
class LookupTestCase(VMUtilsTestBase):
def setUp(self):
super(LookupTestCase, self).setUp()
self.session = self.mox.CreateMockAnything('Fake Session')
self.name_label = 'my_vm'
def _do_mock(self, result):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label).AndReturn(result)
self.mox.ReplayAll()
def test_normal(self):
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label)
self.assertEqual('x', result)
def test_no_result(self):
self._do_mock([])
result = vm_utils.lookup(self.session, self.name_label)
self.assertIsNone(result)
def test_too_many(self):
self._do_mock(['a', 'b'])
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label)
def test_rescue_none(self):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('x', result)
def test_rescue_found(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['y'])
self.mox.ReplayAll()
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('y', result)
def test_rescue_too_many(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
self.mox.ReplayAll()
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label,
check_rescue=True)
class GenerateConfigDriveTestCase(VMUtilsTestBase):
def test_no_admin_pass(self):
instance = {}
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr('session').AndReturn('sr_ref')
self.mox.StubOutWithMock(vm_utils, 'create_vdi')
vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
'configdrive',
64 * units.Mi).AndReturn('vdi_ref')
self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
vm_utils.vdi_attached_here(
'session', 'vdi_ref', read_only=False).AndReturn(
contextified('mounted_dev'))
class FakeInstanceMetadata(object):
def __init__(_self, instance, content=None, extra_md=None,
network_info=None):
self.assertEqual(network_info, "nw_info")
def metadata_for_config_drive(_self):
return []
self.useFixture(fixtures.MonkeyPatch(
'compute.api.metadata.base.InstanceMetadata',
FakeInstanceMetadata))
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
'-allow-lowercase', '-allow-multidot', '-l',
'-publisher', mox.IgnoreArg(), '-quiet',
'-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
attempts=1, run_as_root=False).AndReturn(None)
utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), run_as_root=True).AndReturn(None)
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
bootable=False, read_only=True).AndReturn(None)
self.mox.ReplayAll()
# And the actual call we're testing
vm_utils.generate_configdrive('session', instance, 'vm_ref',
'userdevice', "nw_info")
@mock.patch.object(vm_utils, "destroy_vdi")
@mock.patch.object(vm_utils, "vdi_attached_here")
@mock.patch.object(vm_utils, "create_vdi")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
mock_destroy):
mock_create_vdi.return_value = 'vdi_ref'
mock_attached.side_effect = test.TestingException
mock_destroy.side_effect = exception.StorageError(reason="")
instance = {"uuid": "asdf"}
self.assertRaises(test.TestingException,
vm_utils.generate_configdrive,
'session', instance, 'vm_ref', 'userdevice',
'nw_info')
mock_destroy.assert_called_once_with('session', 'vdi_ref')
class XenAPIGetUUID(VMUtilsTestBase):
def test_get_this_vm_uuid_new_kernel(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
vm_utils._get_sys_hypervisor_uuid().AndReturn(
'2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
def test_get_this_vm_uuid_old_kernel_reboot(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
self.mox.StubOutWithMock(utils, 'execute')
vm_utils._get_sys_hypervisor_uuid().AndRaise(
IOError(13, 'Permission denied'))
utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
('27', ''))
utils.execute('xenstore-read', '/local/domain/27/vm',
run_as_root=True).AndReturn(
('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
class FakeSession(object):
def call_xenapi(self, *args):
pass
def call_plugin(self, *args):
pass
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
pass
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
pass
class FetchVhdImageTestCase(VMUtilsTestBase):
def setUp(self):
super(FetchVhdImageTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = FakeSession()
self.instance = {"uuid": "uuid"}
self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(self.session).AndReturn('sr_path')
def _stub_glance_download_vhd(self, raise_exc=None):
self.mox.StubOutWithMock(
self.session, 'call_plugin_serialized_with_retry')
func = self.session.call_plugin_serialized_with_retry(
'glance', 'download_vhd2', 0, mox.IgnoreArg(), mox.IgnoreArg(),
extra_headers={'X-Auth-Token': 'auth_token',
'X-Roles': '',
'X-Tenant-Id': None,
'X-User-Id': None,
'X-Identity-Status': 'Confirmed'},
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path')
if raise_exc:
func.AndRaise(raise_exc)
else:
func.AndReturn({'root': {'uuid': 'vdi'}})
def _stub_bittorrent_download_vhd(self, raise_exc=None):
self.mox.StubOutWithMock(
self.session, 'call_plugin_serialized')
func = self.session.call_plugin_serialized(
'bittorrent', 'download_vhd',
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path',
torrent_download_stall_cutoff=600,
torrent_listen_port_start=6881,
torrent_listen_port_end=6891,
torrent_max_last_accessed=86400,
torrent_max_seeder_processes_per_host=1,
torrent_seed_chance=1.0,
torrent_seed_duration=3600,
torrent_url='http://foo/image_id.torrent'
)
if raise_exc:
func.AndRaise(raise_exc)
else:
func.AndReturn({'root': {'uuid': 'vdi'}})
def test_fetch_vhd_image_works_with_glance(self):
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(
self.context, self.session, self.instance, "vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_fetch_vhd_image_works_with_bittorrent(self):
cfg.CONF.import_opt('torrent_base_url',
'compute.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(True)
self._stub_bittorrent_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi").AndRaise(exception.FlavorDiskSmallerThanImage(
flavor_size=0, image_size=1))
self.mox.StubOutWithMock(self.session, 'call_xenapi')
self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
vm_utils.destroy_vdi(self.session,
"ref").AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._fetch_vhd_image, self.context, self.session,
self.instance, 'image_id')
self.mox.VerifyAll()
def test_fallback_to_default_handler(self):
cfg.CONF.import_opt('torrent_base_url',
'compute.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(True)
self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
vm_utils.get_sr_path(self.session).AndReturn('sr_path')
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_default_handler_does_not_fallback_to_itself(self):
cfg.CONF.import_opt('torrent_base_url',
'compute.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd(raise_exc=RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
self.context, self.session, self.instance, 'image_id')
self.mox.VerifyAll()
class TestImageCompression(VMUtilsTestBase):
def test_image_compression(self):
# Testing for compute.conf, too low, negative, and a correct value.
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=0, group='xenserver')
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=-6, group='xenserver')
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=6, group='xenserver')
self.assertEqual(vm_utils.get_compression_level(), 6)
class ResizeHelpersTestCase(VMUtilsTestBase):
def setUp(self):
super(ResizeHelpersTestCase, self).setUp()
self.context = context.RequestContext('user', 'project')
def test_repair_filesystem(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('e2fsck', '-f', "-y", "fakepath",
run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
("size is: 42", ""))
self.mox.ReplayAll()
vm_utils._repair_filesystem("fakepath")
def _call_tune2fs_remove_journal(self, path):
utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
def _call_tune2fs_add_journal(self, path):
utils.execute("tune2fs", "-j", path, run_as_root=True)
def _call_parted_mkpart(self, path, start, end):
utils.execute('parted', '--script', path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', path, 'mkpart',
'primary', '%ds' % start, '%ds' % end, run_as_root=True)
def _call_parted_boot_flag(self, path):
utils.execute('parted', '--script', path, 'set', '1',
'boot', 'on', run_as_root=True)
def test_resize_part_and_fs_down_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
self._call_parted_mkpart(dev_path, 0, 9)
self._call_parted_boot_flag(dev_path)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
def test_log_progress_if_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
vm_utils.LOG.debug("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": 50.0, "left": 1})
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_log_progress_if_not_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_resize_part_and_fs_down_fails_disk_too_big(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
new_sectors = 10
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
mobj = utils.execute("resize2fs",
partition_path,
"%ss" % new_sectors,
run_as_root=True)
mobj.AndRaise(processutils.ProcessExecutionError)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vm_utils._resize_part_and_fs,
"fake", 0, 20, 10, "boot")
def test_resize_part_and_fs_up_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
self._call_parted_mkpart(dev_path, 0, 29)
utils.execute("resize2fs", partition_path, run_as_root=True)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
def test_resize_disk_throws_on_zero_size(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0)
self.assertRaises(exception.ResizeError, vm_utils.resize_disk,
"session", "instance", "vdi_ref", flavor)
def test_auto_config_disk_returns_early_on_zero_size(self):
vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
@mock.patch.object(utils, "execute")
def test_get_partitions(self, mock_execute):
parted_return = "BYT;\n...\n"
parted_return += "1:2s:11s:10s:ext3::boot;\n"
parted_return += "2:20s:11s:10s::bob:;\n"
mock_execute.return_value = (parted_return, None)
partitions = vm_utils._get_partitions("abc")
self.assertEqual(2, len(partitions))
self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
class CheckVDISizeTestCase(VMUtilsTestBase):
def setUp(self):
super(CheckVDISizeTestCase, self).setUp()
self.context = 'fakecontext'
self.session = 'fakesession'
self.instance = compute.Instance(uuid=str(uuid.uuid4()))
self.flavor = compute.Flavor()
self.vdi_uuid = 'fakeuuid'
def test_not_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(1073741824)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
def test_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._check_vdi_size, self.context,
self.session, self.instance, self.vdi_uuid)
def test_zero_root_gb_disables_check(self):
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 0
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='compute.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
def test_get_instance_vdis_for_sr(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
vdi_1 = fake.create_vdi('vdiname1', sr_ref)
vdi_2 = fake.create_vdi('vdiname2', sr_ref)
for vdi_ref in [vdi_1, vdi_2]:
fake.create_vbd(vm_ref, vdi_ref)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([vdi_1, vdi_2], result)
def test_get_instance_vdis_for_sr_no_vbd(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([], result)
class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
def test_lookup_call(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn('ignored')
mock.ReplayAll()
vm_utils.vm_ref_or_raise('session', 'somename')
mock.VerifyAll()
def test_return_value(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
mock.ReplayAll()
self.assertEqual(
'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
mock.VerifyAll()
class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
def test_exception_raised(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
self.assertRaises(
exception.InstanceNotFound,
lambda: vm_utils.vm_ref_or_raise('session', 'somename')
)
mock.VerifyAll()
def test_exception_msg_contains_vm_name(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
self.assertIn('somename', six.text_type(e))
mock.VerifyAll()
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
class CreateCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateCachedImageTestCase, self).setUp()
self.session = _get_fake_session()
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
def test_no_cow_no_ext(self, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
'vdi_ref', None, None, None,
'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'vdi_uuid',
'file': None}})
def test_noncached(self, mock_fetch_image, mock_clone_vdi,
mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
None, None, None, None, None,
None, 'vdi_uuid']
self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
class BittorrentTestCase(VMUtilsTestBase):
def setUp(self):
super(BittorrentTestCase, self).setUp()
self.context = context.get_admin_context()
def test_image_uses_bittorrent(self):
instance = {'system_metadata': {'image_bittorrent': True}}
self.flags(torrent_images='some', group='xenserver')
self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
instance))
def _test_create_image(self, cache_type):
instance = {'system_metadata': {'image_cache_in_nova': True}}
self.flags(cache_images=cache_type, group='xenserver')
was = {'called': None}
def fake_create_cached_image(*args):
was['called'] = 'some'
return (False, {})
self.stubs.Set(vm_utils, '_create_cached_image',
fake_create_cached_image)
def fake_fetch_image(*args):
was['called'] = 'none'
return {}
self.stubs.Set(vm_utils, '_fetch_image',
fake_fetch_image)
vm_utils.create_image(self.context, None, instance,
'foo', 'bar', 'baz')
self.assertEqual(was['called'], cache_type)
def test_create_image_cached(self):
self._test_create_image('some')
def test_create_image_uncached(self):
self._test_create_image('none')
class ShutdownTestCase(VMUtilsTestBase):
def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.hard_shutdown_vm(
session, instance, vm_ref))
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.clean_shutdown_vm(
session, instance, vm_ref))
class CreateVBDTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateVBDTestCase, self).setUp()
self.session = FakeSession()
self.mock = mox.Mox()
self.mock.StubOutWithMock(self.session, 'call_xenapi')
self.vbd_rec = self._generate_vbd_rec()
def _generate_vbd_rec(self):
vbd_rec = {}
vbd_rec['VM'] = 'vm_ref'
vbd_rec['VDI'] = 'vdi_ref'
vbd_rec['userdevice'] = '0'
vbd_rec['bootable'] = False
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
return vbd_rec
def test_create_vbd_default_args(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_osvol(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
"osvol", "True")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
osvol=True)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_extra_args(self):
self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
self.vbd_rec['type'] = 'a'
self.vbd_rec['mode'] = 'RO'
self.vbd_rec['bootable'] = True
self.vbd_rec['empty'] = True
self.vbd_rec['unpluggable'] = False
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
vbd_type="a", read_only=True, bootable=True,
empty=True, unpluggable=False)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_attach_cd(self):
self.mock.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.create_vbd(self.session, "vm_ref", None, 1,
vbd_type='cd', read_only=True, bootable=True,
empty=True, unpluggable=False).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
self.mock.ReplayAll()
result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
class UnplugVbdTestCase(VMUtilsTestBase):
@mock.patch.object(greenthread, 'sleep')
def test_unplug_vbd_works(self, mock_sleep):
session = _get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
self.assertEqual(0, mock_sleep.call_count)
def test_unplug_vbd_raises_unexpected_error(self):
session = _get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
session.call_xenapi.side_effect = test.TestingException()
self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_detached_works(self):
error = "DEVICE_ALREADY_DETACHED"
session = _get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
session = _get_fake_session("")
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def _test_uplug_vbd_retries(self, mock_sleep, error):
session = _get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(11, session.call_xenapi.call_count)
self.assertEqual(10, mock_sleep.call_count)
def _test_uplug_vbd_retries_with_neg_val(self):
session = _get_fake_session()
self.flags(num_vbd_unplug_retries=-1, group='xenserver')
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"DEVICE_DETACH_REJECTED")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"INTERNAL_ERROR")
class VDIOtherConfigTestCase(VMUtilsTestBase):
"""Tests to ensure that the code is populating VDI's `other_config`
attribute with the correct metadta.
"""
def setUp(self):
super(VDIOtherConfigTestCase, self).setUp()
class _FakeSession(object):
def call_xenapi(self, operation, *args, **kwargs):
# VDI.add_to_other_config -> VDI_add_to_other_config
method = getattr(self, operation.replace('.', '_'), None)
if method:
return method(*args, **kwargs)
self.operation = operation
self.args = args
self.kwargs = kwargs
self.session = _FakeSession()
self.context = context.get_admin_context()
self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
'name': 'myinstance'}
def test_create_vdi(self):
# Some images are registered with XenServer explicitly by calling
# `create_vdi`
vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
'myvdi', 'root', 1024, read_only=True)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, self.session.args[0]['other_config'])
def test_create_image(self):
# Other images are registered implicitly when they are dropped into
# the SR by a dom0 plugin or some other process
self.flags(cache_images='none', group='xenserver')
def fake_fetch_image(*args):
return {'root': {'uuid': 'fake-uuid'}}
self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
vm_utils.create_image(self.context, self.session, self.fake_instance,
'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
def test_import_migrated_vhds(self):
# Migrated images should preserve the `other_config`
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
def call_plugin_serialized(*args, **kwargs):
return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
self.session.call_plugin_serialized = call_plugin_serialized
self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
vm_utils._import_migrated_vhds(self.session, self.fake_instance,
"disk_label", "root", "vdi_label")
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
class GenerateDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateDiskTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='compute.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
self.vm_ref = fake.create_vm("foo", "Running")
def tearDown(self):
super(GenerateDiskTestCase, self).tearDown()
fake.destroy_vm(self.vm_ref)
def _expect_parted_calls(self):
self.mox.StubOutWithMock(utils, "execute")
self.mox.StubOutWithMock(utils, "trycmd")
self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
self.mox.StubOutWithMock(vm_utils.os.path, "exists")
if self.session.is_local_connection:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=False, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '2048s', '-0',
check_exit_code=False, run_as_root=True)
vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
utils.trycmd('kpartx', '-a', '/dev/fakedev',
discard_warnings=True, run_as_root=True)
else:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=True, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '2048s', '-0',
check_exit_code=True, run_as_root=True)
def _check_vdi(self, vdi_ref, check_attached=True):
vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
if check_attached:
vbd_ref = vdi_rec["VBDs"][0]
vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
self.assertEqual(self.vm_ref, vbd_rec['VM'])
else:
self.assertEqual(0, len(vdi_rec["VBDs"]))
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_with_no_fs_given(self):
self._expect_parted_calls()
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "user", 10, None, None)
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_swap(self):
self._expect_parted_calls()
utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "swap", 10, "swap", None)
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral(self):
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral',
'/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"}, self.vm_ref,
"4", "name", "ephemeral", 10, "ext4", "ephemeral")
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ensure_cleanup_called(self):
self._expect_parted_calls()
utils.execute(
'mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral', '/dev/fakedev1',
run_as_root=True).AndRaise(test.TestingException)
vm_utils.destroy_vdi(
self.session,
mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(
test.TestingException, vm_utils._generate_disk,
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "4", "name", "ephemeral", 10, "ext4", "ephemeral")
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral_local_not_attached(self):
self.session.is_local_connection = True
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral',
'/dev/mapper/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"},
None, "4", "name", "ephemeral", 10, "ext4", "ephemeral")
self._check_vdi(vdi_ref, check_attached=False)
class GenerateEphemeralTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateEphemeralTestCase, self).setUp()
self.session = "session"
self.instance = "instance"
self.vm_ref = "vm_ref"
self.name_label = "name"
self.ephemeral_name_label = "name ephemeral"
self.userdevice = 4
self.fs_label = "ephemeral"
self.mox.StubOutWithMock(vm_utils, "_generate_disk")
self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
def test_get_ephemeral_disk_sizes_simple(self):
result = vm_utils.get_ephemeral_disk_sizes(20)
expected = [20]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_three_disks_2000(self):
result = vm_utils.get_ephemeral_disk_sizes(4030)
expected = [2000, 2000, 30]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_two_disks_1024(self):
result = vm_utils.get_ephemeral_disk_sizes(2048)
expected = [1024, 1024]
self.assertEqual(expected, list(result))
def _expect_generate_disk(self, size, device, name_label, fs_label):
vm_utils._generate_disk(
self.session, self.instance, self.vm_ref,
str(device), name_label, 'ephemeral',
size * 1024, None, fs_label).AndReturn(device)
def test_generate_ephemeral_adds_one_disk(self):
self._expect_generate_disk(
20, self.userdevice, self.ephemeral_name_label, self.fs_label)
self.mox.ReplayAll()
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 20)
def test_generate_ephemeral_adds_multiple_disks(self):
self._expect_generate_disk(
2000, self.userdevice, self.ephemeral_name_label, self.fs_label)
self._expect_generate_disk(
2000, self.userdevice + 1, self.ephemeral_name_label + " (1)",
self.fs_label + "1")
self._expect_generate_disk(
30, self.userdevice + 2, self.ephemeral_name_label + " (2)",
self.fs_label + "2")
self.mox.ReplayAll()
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4030)
def test_generate_ephemeral_cleans_up_on_error(self):
self._expect_generate_disk(
1024, self.userdevice, self.ephemeral_name_label, self.fs_label)
self._expect_generate_disk(
1024, self.userdevice + 1, self.ephemeral_name_label + " (1)",
self.fs_label + "1")
vm_utils._generate_disk(
self.session, self.instance, self.vm_ref,
str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
units.Mi, None, 'ephemeral2').AndRaise(exception.NovaException)
vm_utils.safe_destroy_vdis(self.session, [4, 5])
self.mox.ReplayAll()
self.assertRaises(
exception.NovaException, vm_utils.generate_ephemeral,
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4096)
class FakeFile(object):
def __init__(self):
self._file_operations = []
def seek(self, offset):
self._file_operations.append((self.seek, offset))
class StreamDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(StreamDiskTestCase, self).setUp()
self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
self.mox.StubOutWithMock(vm_utils, '_write_partition')
# NOTE(matelakat): This might hide the fail reason, as test runners
# are unhappy with a mocked out open.
self.mox.StubOutWithMock(six.moves.builtins, 'open')
self.image_service_func = self.mox.CreateMockAnything()
def test_non_ami(self):
fake_file = FakeFile()
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.KERNEL, None, 'dev')
self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
def test_ami_disk(self):
fake_file = FakeFile()
vm_utils._write_partition("session", 100, 'dev')
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.DISK, 100, 'dev')
self.assertEqual(
[(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
fake_file._file_operations)
class VMUtilsSRPath(VMUtilsTestBase):
def setUp(self):
super(VMUtilsSRPath, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='compute.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
def test_defined(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {'path': 'sr_path'}}})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
def test_default(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {}}})
self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
{'uuid': 'sr_uuid', 'type': 'ext'})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session),
"/var/run/sr-mount/sr_uuid")
class CreateKernelRamdiskTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateKernelRamdiskTestCase, self).setUp()
self.context = "context"
self.session = FakeSession()
self.instance = {"kernel_id": None, "ramdisk_id": None}
self.name_label = "name"
self.mox.StubOutWithMock(self.session, "call_plugin")
self.mox.StubOutWithMock(uuid, "uuid4")
self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
def test_create_kernel_and_ramdisk_no_create(self):
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual((None, None), result)
def test_create_kernel_and_ramdisk_create_both_cached(self):
kernel_id = "kernel"
ramdisk_id = "ramdisk"
self.instance["kernel_id"] = kernel_id
self.instance["ramdisk_id"] = ramdisk_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_kernel).AndReturn("k")
args_ramdisk = {}
args_ramdisk['cached-image'] = ramdisk_id
args_ramdisk['new-image-uuid'] = "fake_uuid2"
uuid.uuid4().AndReturn("fake_uuid2")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_ramdisk).AndReturn("r")
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", "r"), result)
def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_kernel).AndReturn("")
kernel = {"kernel": {"file": "k"}}
vm_utils._fetch_disk_image(self.context, self.session, self.instance,
self.name_label, kernel_id, 0).AndReturn(kernel)
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", None), result)
class ScanSrTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, "_scan_sr")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
mock_safe_find_sr.return_value = "sr_ref"
self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
def test_scan_sr_works(self):
session = mock.Mock()
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
def test_scan_sr_unknown_error_fails_once(self):
session = mock.Mock()
session.call_xenapi.side_effect = test.TestingException
self.assertRaises(test.TestingException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
self.assertRaises(FakeException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(4, session.call_xenapi.call_count)
mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
def fake_call_xenapi(*args):
fake_call_xenapi.count += 1
if fake_call_xenapi.count != 2:
raise FakeException()
fake_call_xenapi.count = 0
session.call_xenapi.side_effect = fake_call_xenapi
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(2, session.call_xenapi.call_count)
mock_sleep.assert_called_once_with(2)
@mock.patch.object(flavors, 'extract_flavor',
return_value={
'memory_mb': 1024,
'vcpus': 1,
'vcpu_weight': 1.0,
})
class CreateVmTestCase(VMUtilsTestBase):
def test_vss_provider(self, mock_extract):
self.flags(vcpu_pin_set="2,3")
session = _get_fake_session()
instance = compute.Instance(uuid="uuid",
os_type="windows",
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = compute.Flavor._from_db_object(
None, compute.Flavor(), test_flavor.fake_flavor)
vm_utils.create_vm(session, instance, "label",
"kernel", "ramdisk")
vm_rec = {
'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '', 'tags': [],
'VCPUs_max': '4',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': 'uuid'},
'name_label': 'label',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '4',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': 'true',
'acpi': 'true'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False
}
session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
def test_invalid_cpu_mask_raises(self, mock_extract):
self.flags(vcpu_pin_set="asdf")
session = mock.Mock()
instance = compute.Instance(uuid=str(uuid.uuid4()),
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = compute.Flavor._from_db_object(
None, compute.Flavor(), test_flavor.fake_flavor)
self.assertRaises(exception.Invalid,
vm_utils.create_vm,
session, instance, "label",
"kernel", "ramdisk")
def test_destroy_vm(self, mock_extract):
session = mock.Mock()
instance = compute.Instance(uuid=str(uuid.uuid4()))
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
def test_destroy_vm_silently_fails(self, mock_extract):
session = mock.Mock()
exc = test.TestingException()
session.XenAPI.Failure = test.TestingException
session.VM.destroy.side_effect = exc
instance = compute.Instance(uuid=str(uuid.uuid4()))
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
class DetermineVmModeTestCase(VMUtilsTestBase):
def _fake_object(self, updates):
return fake_instance.fake_instance_obj(None, **updates)
def test_determine_vm_mode_returns_xen_mode(self):
instance = self._fake_object({"vm_mode": "xen"})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_mode(self):
instance = self._fake_object({"vm_mode": "hvm"})
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_linux(self):
instance = self._fake_object({"vm_mode": None, "os_type": "linux"})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_for_windows(self):
instance = self._fake_object({"vm_mode": None, "os_type": "windows"})
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_by_default(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_VHD(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
def test_determine_vm_mode_returns_xen_for_DISK(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
class CallXenAPIHelpersTestCase(VMUtilsTestBase):
def test_vm_get_vbd_refs(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
def test_vbd_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
session.call_xenapi.assert_called_once_with("VBD.get_record",
"vbd_ref")
def test_vdi_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.get_record",
"vdi_ref")
def test_vdi_snapshot(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.snapshot",
"vdi_ref", {})
def test_vdi_get_virtual_size(self):
session = mock.Mock()
session.call_xenapi.return_value = "123"
self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
"ref")
@mock.patch.object(vm_utils, '_get_resize_func_name')
def test_vdi_resize(self, mock_get_resize_func_name):
session = mock.Mock()
mock_get_resize_func_name.return_value = "VDI.fake"
vm_utils._vdi_resize(session, "ref", 123)
session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
mock_get_size.return_value = (1024 ** 3) - 1
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3 + 1
instance = {"uuid": "a"}
self.assertRaises(exception.ResizeError,
vm_utils.update_vdi_virtual_size,
"s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_get_rec')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetVdiForVMTestCase(VMUtilsTestBase):
def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
vdi_get_rec.return_value = {}
result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
self.assertEqual(('vdi_ref', {}), result)
vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
vbd_get_rec.assert_called_once_with(session, "a")
vdi_get_rec.assert_called_once_with(session, "vdi_ref")
def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
self.assertRaises(exception.NovaException,
vm_utils.get_vdi_for_vm_safely,
session, "vm_ref", userdevice='1')
self.assertEqual([], vdi_get_rec.call_args_list)
self.assertEqual(2, len(vbd_get_rec.call_args_list))
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetAllVdiForVMTestCase(VMUtilsTestBase):
def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
def fake_vbd_get_rec(session, vbd_ref):
return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
def fake_vdi_get_uuid(session, vdi_ref):
return vdi_ref
vm_get_vbd_refs.return_value = ["0", "2"]
vbd_get_rec.side_effect = fake_vbd_get_rec
vdi_get_uuid.side_effect = fake_vdi_get_uuid
def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
expected = ['vdi_ref_0', 'vdi_ref_2']
self.assertEqual(expected, list(result))
def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
min_userdevice=1)
expected = ["vdi_ref_2"]
self.assertEqual(expected, list(result))
class GetAllVdisTestCase(VMUtilsTestBase):
def test_get_all_vdis_in_sr(self):
def fake_get_rec(record_type, ref):
if ref == "2":
return "vdi_rec_2"
session = mock.Mock()
session.call_xenapi.return_value = ["1", "2"]
session.get_rec.side_effect = fake_get_rec
sr_ref = "sr_ref"
actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
self.assertEqual(actual, [('2', 'vdi_rec_2')])
session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
class VDIAttachedHere(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'destroy_vbd')
@mock.patch.object(vm_utils, '_get_this_vm_ref')
@mock.patch.object(vm_utils, 'create_vbd')
@mock.patch.object(vm_utils, '_remap_vbd_dev')
@mock.patch.object(vm_utils, '_wait_for_device')
@mock.patch.object(utils, 'execute')
def test_sync_called(self, mock_execute, mock_wait_for_device,
mock_remap_vbd_dev, mock_create_vbd,
mock_get_this_vm_ref, mock_destroy_vbd):
session = _get_fake_session()
with vm_utils.vdi_attached_here(session, 'vdi_ref'):
pass
mock_execute.assert_called_with('sync', run_as_root=True)
class SnapshotAttachedHereTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
def test_snapshot_attached_here(self, mock_impl):
def fake_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
self.assertEqual("session", session)
self.assertEqual("instance", instance)
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("label", label)
self.assertEqual('0', userdevice)
self.assertIsNone(post_snapshot_callback)
yield "fake"
mock_impl.side_effect = fake_impl
with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
"label") as result:
self.assertEqual("fake", result)
mock_impl.assert_called_once_with("session", "instance", "vm_ref",
"label", '0', None)
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vdi_snapshot')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
mock_vdi_snapshot, mock_vdi_get_uuid,
mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
session = "session"
instance = {"uuid": "uuid"}
mock_callback = mock.Mock()
mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
{"SR": "sr_ref",
"uuid": "vdi_uuid"})
mock_vdi_snapshot.return_value = "snap_ref"
mock_vdi_get_uuid.return_value = "snap_uuid"
mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
try:
with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
"label", '2', mock_callback) as result:
self.assertEqual(["a", "b"], result)
raise test.TestingException()
self.assertTrue(False)
except test.TestingException:
pass
mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
'2')
mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
"sr_ref", "vdi_ref", ['a', 'b'])
mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
mock.call(session, "snap_uuid")])
mock_callback.assert_called_once_with(
task_state="image_pending_upload")
mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
instance, ['a', 'b'], "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid"])
self.assertFalse(mock_sleep.called)
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
mock_count):
mock_count.return_value = 2
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertFalse(mock_sleep.called)
self.assertTrue(mock_count.called)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
self.assertRaises(exception.NovaException,
vm_utils._wait_for_vhd_coalesce, "session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertTrue(mock_count.called)
self.assertEqual(20, mock_sleep.call_count)
self.assertEqual(20, mock_scan_sr.call_count)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(2, mock_scan_sr.call_count)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_count_children(self, mock_get_all_vdis_in_sr):
vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
mock_get_all_vdis_in_sr.return_value = vdis
self.assertEqual(2, vm_utils._count_children('session',
'parent1', 'sr'))
class ImportMigratedDisksTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance)
expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
mock_root.assert_called_once_with(session, instance)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks_import_root_false(self, mock_root,
mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance,
import_root=False)
expected = {'root': None, 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
self.assertEqual(0, mock_root.call_count)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrated_root_disk(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name"}
result = vm_utils._import_migrated_root_disk("s", instance)
self.assertEqual("foo", result)
mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
"name")
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrate_ephemeral_disks(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = compute.Instance(id=1, uuid=uuidutils.generate_uuid())
instance.old_flavor = compute.Flavor(ephemeral_gb=4000)
result = vm_utils._import_migrate_ephemeral_disks("s", instance)
self.assertEqual({'4': 'foo', '5': 'foo'}, result)
inst_uuid = instance.uuid
inst_name = instance.name
expected_calls = [mock.call("s", instance,
"%s_ephemeral_1" % inst_uuid,
"ephemeral",
"%s ephemeral (1)" % inst_name),
mock.call("s", instance,
"%s_ephemeral_2" % inst_uuid,
"ephemeral",
"%s ephemeral (2)" % inst_name)]
self.assertEqual(expected_calls, mock_migrate.call_args_list)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_import_migrate_ephemeral_disks_use_old_flavor(self,
mock_get_sizes):
mock_get_sizes.return_value = []
instance = compute.Instance(id=1, uuid=uuidutils.generate_uuid(),
ephemeral_gb=2000)
instance.old_flavor = compute.Flavor(ephemeral_gb=4000)
vm_utils._import_migrate_ephemeral_disks("s", instance)
mock_get_sizes.assert_called_once_with(4000)
@mock.patch.object(vm_utils, '_set_vdi_info')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
mock_set_info):
session = mock.Mock()
instance = {"uuid": "uuid"}
session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
session.call_xenapi.return_value = "vdi_ref"
mock_get_sr_path.return_value = "sr_path"
result = vm_utils._import_migrated_vhds(session, instance,
'chain_label', 'disk_type', 'vdi_label')
expected = {'uuid': "a", 'ref': "vdi_ref"}
self.assertEqual(expected, result)
mock_get_sr_path.assert_called_once_with(session)
session.call_plugin_serialized.assert_called_once_with('migration',
'move_vhds_into_sr', instance_uuid='chain_label',
sr_path='sr_path', uuid_stack=mock.ANY)
mock_scan_sr.assert_called_once_with(session)
session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
'vdi_label', 'disk_type', instance)
def test_get_vhd_parent_uuid_rec_provided(self):
session = mock.Mock()
vdi_ref = 'vdi_ref'
vdi_rec = {'sm_config': {}}
self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
vdi_ref,
vdi_rec))
self.assertFalse(session.call_xenapi.called)
class MigrateVHDTestCase(VMUtilsTestBase):
def _assert_transfer_called(self, session, label):
session.call_plugin_serialized.assert_called_once_with(
'migration', 'transfer_vhd', instance_uuid=label, host="dest",
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
def test_migrate_vhd_root(self):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2)
self._assert_transfer_called(session, "a")
def test_migrate_vhd_ephemeral(self):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2, 2)
self._assert_transfer_called(session, "a_ephemeral_2")
def test_migrate_vhd_converts_exceptions(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_plugin_serialized.side_effect = test.TestingException()
instance = {"uuid": "a"}
self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
session, instance, "vdi_uuid", "dest", "sr_path", 2)
self._assert_transfer_called(session, "a")
class StripBaseMirrorTestCase(VMUtilsTestBase):
def test_strip_base_mirror_from_vdi_works(self):
session = mock.Mock()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
def test_strip_base_mirror_from_vdi_hides_error(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_xenapi.side_effect = test.TestingException()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
@mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
def test_strip_base_mirror_from_vdis(self, mock_strip):
def call_xenapi(method, arg):
if method == "VM.get_VBDs":
return ['VBD_ref_1', 'VBD_ref_2']
if method == "VBD.get_VDI":
return 'VDI' + arg[3:]
return "Unexpected call_xenapi: %s.%s" % (method, arg)
session = mock.Mock()
session.call_xenapi.side_effect = call_xenapi
vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
expected = [mock.call('VM.get_VBDs', "vm_ref"),
mock.call('VBD.get_VDI', "VBD_ref_1"),
mock.call('VBD.get_VDI', "VBD_ref_2")]
self.assertEqual(expected, session.call_xenapi.call_args_list)
expected = [mock.call(session, "VDI_ref_1"),
mock.call(session, "VDI_ref_2")]
self.assertEqual(expected, mock_strip.call_args_list)
class DeviceIdTestCase(VMUtilsTestBase):
def test_device_id_is_none_if_not_specified_in_meta_data(self):
image_meta = compute.ImageMeta.from_dict({})
session = mock.Mock()
session.product_version = (6, 1, 0)
self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
image_meta = compute.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 2, 0)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
session.product_version = (6, 3, 1)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
image_meta = compute.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 0)
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version (6, 0)", exc.message)
session.product_version = ('6a')
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version 6a", exc.message)
class CreateVmRecordTestCase(VMUtilsTestBase):
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_linux(self, mock_extract_flavor):
instance = compute.Instance(uuid="uuid123",
os_type="linux")
self._test_create_vm_record(mock_extract_flavor, instance, False)
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_windows(self, mock_extract_flavor):
instance = compute.Instance(uuid="uuid123",
os_type="windows")
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = compute.Flavor._from_db_object(
None, compute.Flavor(), test_flavor.fake_flavor)
self._test_create_vm_record(mock_extract_flavor, instance, True)
def _test_create_vm_record(self, mock_extract_flavor, instance,
is_viridian):
session = _get_fake_session()
flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
mock_extract_flavor.return_value = flavor
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = compute.Flavor(memory_mb=1024,
vcpus=1,
vcpu_weight=2)
vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
device_id=2)
is_viridian_str = str(is_viridian).lower()
expected_vm_rec = {
'VCPUs_params': {'cap': '0', 'weight': '2'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '',
'tags': [],
'VCPUs_max': '1',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': 'uuid123'},
'name_label': 'name',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '1',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': is_viridian_str,
'acpi': 'true', 'device_id': '0002'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False}
session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
def test_list_vms(self):
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='compute.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
fake.create_vm("foo1", "Halted")
vm_ref = fake.create_vm("foo2", "Running")
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.list_vms(driver._session))
# Will have 3 VMs - but one is Dom0 and one is not running on the host
self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
self.assertEqual(len(result), 1)
result_keys = [key for (key, value) in result]
self.assertIn(vm_ref, result_keys)
class ChildVHDsTestCase(test.NoDBTestCase):
all_vdis = [
("my-vdi-ref",
{"uuid": "my-uuid", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("non-parent",
{"uuid": "uuid-1", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("diff-parent",
{"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child",
{"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child-snap",
{"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": True, "other_config": {}}),
]
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_defaults(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_only_snapshots(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_chain(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref",
["my-uuid", "other-uuid"], old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
def test_is_vdi_a_snapshot_works(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {}}
self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_base_images_false(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {"image-id": "fake"}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
vdi_rec = {"is_a_snapshot": False,
"other_config": {}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
instance = {"uuid": "fake"}
mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
vm_utils.remove_old_snapshots("session", instance, "vm_ref")
mock_delete.assert_called_once_with("session", instance,
["uuid1", "uuid2"], "sr_ref")
mock_get.assert_called_once_with("session", "vm_ref")
mock_walk.assert_called_once_with("session", "vdi")
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
instance = {"uuid": "fake"}
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid"], "sr")
self.assertFalse(mock_child.called)
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
instance = {"uuid": "fake"}
mock_child.return_value = []
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with("session", "sr", ["uuid2"],
old_snapshots_only=True)
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
mock_destroy, mock_scan):
instance = {"uuid": "fake"}
mock_child.return_value = ["suuid1", "suuid2"]
session = mock.Mock()
session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
vm_utils._delete_snapshots_in_vdi_chain(session, instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with(session, "sr", ["uuid2"],
old_snapshots_only=True)
session.VDI.get_by_uuid.assert_has_calls([
mock.call("suuid1"), mock.call("suuid2")])
mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
mock_scan.assert_called_once_with(session, "sr")
class ResizeFunctionTestCase(test.NoDBTestCase):
def _call_get_resize_func_name(self, brand, version):
session = mock.Mock()
session.product_brand = brand
session.product_version = version
return vm_utils._get_resize_func_name(session)
def _test_is_resize(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize", result)
def _test_is_resize_online(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize_online", result)
def test_xenserver_5_5(self):
self._test_is_resize_online("XenServer", (5, 5, 0))
def test_xenserver_6_0(self):
self._test_is_resize("XenServer", (6, 0, 0))
def test_xcp_1_1(self):
self._test_is_resize_online("XCP", (1, 1, 0))
def test_xcp_1_2(self):
self._test_is_resize("XCP", (1, 2, 0))
def test_xcp_2_0(self):
self._test_is_resize("XCP", (2, 0, 0))
def test_random_brand(self):
self._test_is_resize("asfd", (1, 1, 0))
def test_default(self):
self._test_is_resize(None, None)
def test_empty(self):
self._test_is_resize("", "")
def test_bad_version(self):
self._test_is_resize("XenServer", "asdf")
class VMInfoTests(VMUtilsTestBase):
def setUp(self):
super(VMInfoTests, self).setUp()
self.session = mock.Mock()
def test_get_power_state_valid(self):
# Save on test setup calls by having these simple tests in one method
self.session.call_xenapi.return_value = "Running"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.RUNNING)
self.session.call_xenapi.return_value = "Halted"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SHUTDOWN)
self.session.call_xenapi.return_value = "Paused"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.PAUSED)
self.session.call_xenapi.return_value = "Suspended"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SUSPENDED)
self.session.call_xenapi.return_value = "Crashed"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.CRASHED)
def test_get_power_state_invalid(self):
self.session.call_xenapi.return_value = "Invalid"
self.assertRaises(KeyError,
vm_utils.get_power_state, self.session, "ref")
_XAPI_record = {'power_state': 'Running',
'memory_static_max': str(10 << 10),
'memory_dynamic_max': str(9 << 10),
'VCPUs_max': '5'}
def test_compile_info(self):
def call_xenapi(method, *args):
if method.startswith('VM.get_') and args[0] == 'dummy':
return self._XAPI_record[method[7:]]
self.session.call_xenapi.side_effect = call_xenapi
info = vm_utils.compile_info(self.session, "dummy")
self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING,
max_mem_kb=10, mem_kb=9,
num_cpu='5', cpu_time_ns=0),
info)
| {
"content_hash": "1b873a9aa6a80ebc133978702f1beccc",
"timestamp": "",
"source": "github",
"line_count": 2486,
"max_line_length": 79,
"avg_line_length": 40.43765084473049,
"alnum_prop": 0.5791620245105841,
"repo_name": "HybridF5/jacket",
"id": "c9aa2cff91c5d0eadd9165370d4a9313be1615ba",
"size": "101164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/compute/unit/virt/xenapi/test_vm_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
from django.forms.fields import *
from corehq.apps.sms.forms import BackendForm
from dimagi.utils.django.fields import TrimmedCharField
class UnicelBackendForm(BackendForm):
username = TrimmedCharField()
password = TrimmedCharField()
sender = TrimmedCharField()
| {
"content_hash": "eb8368df0d3b24ca534d650b9e939b51",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 55,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.7898550724637681,
"repo_name": "gmimano/commcaretest",
"id": "390b548ad8058d8f1c2095ddfedffaf4e9a620fc",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/unicel/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
} |
"""
Adaptive thresholding
when image has different lighting conditions in different areas, simple thresholding no good
Adaptive: algorithm calculates threshold for small regions of image
- different thresholds for different regions of same image
- gives better results for images with varying illumination
"""
# 3 special input params, one output param
# adaptive method - how thresholding value calculated
# - cv2.ADAPTIVE_THRESH_MEAN_C - threshold value is mean of nbhd area
# - cv2.ADAPTIVE_THRESH_GAUSSIAN_C - threhold val is weighted sum of nbhd vals
# weights are a gaussian window
# Block size - decides sum of nbhd area
# C - constant subtracted from mean or weighted mean calculated
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('dave.jpg', 0)
img = cv2.medianBlur(img, 5)
ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
cv.THRESH_BINARY, 11, 2)
th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
titles = ['Original Image', 'Global Thresholding (v = 127)', 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1, th2, th3]
for i in xrange(4):
plt.subplot(2, 2, i+1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
| {
"content_hash": "e8e3442deb498881265d4bef2433622a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 124,
"avg_line_length": 38,
"alnum_prop": 0.7297297297297297,
"repo_name": "SSG-DRD-IOT/commercial-iot-security-system",
"id": "96d6bc336e73ebd9d80ee8c2b7971e03b8472ad6",
"size": "1406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencv/tutorials/imageProcessing/thresholding/adaptive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "341"
},
{
"name": "Python",
"bytes": "278625"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest
from importlib import reload
from shutil import copyfile
from unittest.mock import patch
from tests.utils import (CaptureOutput, assemble_for_selfie,
for_all_test_results, list_files)
import self as grader
from lib.checks import execute
from self import assignments, main
class TestRiscvInstruction(unittest.TestCase):
@classmethod
def setUpClass(self):
self.instructions = list(
map(lambda f: f[:-2], list_files('tests/instructions', extension='.s')))
def execute_mock(self, command, timeout=60):
if '.tmp.bin' in command:
for instruction in self.instructions:
if instruction in command:
assemble_for_selfie('instructions/' + instruction + '.s')
if '.tmp.s' in command:
for instruction in self.instructions:
if instruction in command:
copyfile('tests/instructions/' +
instruction + '.s', '.tmp.s')
return (0, '')
def check_encoding_results(self, result, msg):
if 'RISC-V encoding' in msg:
self.assertTrue(
result, 'following encoding test passed "' + msg + '"')
if 'assembly instruction format' in msg:
self.assertTrue(
result, 'following format test passed "' + msg + '"')
@patch('lib.checks.execute')
def test_instruction(self, mock):
mock.side_effect = self.execute_mock
with CaptureOutput() as capture:
for assignment in assignments:
grader.main([sys.argv[0], assignment.name])
for_all_test_results(capture.get_output(), self.check_encoding_results)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f12721ba0f3ddf02c6940f681558261c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 84,
"avg_line_length": 31.49122807017544,
"alnum_prop": 0.6022284122562674,
"repo_name": "cksystemsteaching/selfie",
"id": "650b908424d0d368b72adcc01adcd186545c0c8e",
"size": "1795",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "grader/tests/test_riscv_instruction.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "9204"
},
{
"name": "C",
"bytes": "732917"
},
{
"name": "Dockerfile",
"bytes": "6639"
},
{
"name": "Jupyter Notebook",
"bytes": "1376630"
},
{
"name": "Makefile",
"bytes": "26664"
},
{
"name": "Nix",
"bytes": "53"
},
{
"name": "OpenQASM",
"bytes": "11862"
},
{
"name": "Python",
"bytes": "353845"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.gen_gripper_pose import GenGripperPose
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.moveit_move import MoveitMove
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Fri Oct 20 2017
@author: Philippe La Madeleine
'''
class Check_reachabilitySM(Behavior):
'''
check if the object is in range
'''
def __init__(self):
super(Check_reachabilitySM, self).__init__()
self.name = 'Check_reachability'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:609 y:365, x:602 y:89
_state_machine = OperatableStateMachine(outcomes=['ok', 'too_far'], input_keys=['pose'])
_state_machine.userdata.pose = 0
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:42 y:56
OperatableStateMachine.add('gen',
GenGripperPose(l=0, z=0, planar=false),
transitions={'done': 'kinematic test', 'fail': 'too_far'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'pose_in': 'pose', 'pose_out': 'pose_out'})
# x:195 y:347
OperatableStateMachine.add('third check',
CheckConditionState(predicate=lambda x: (x.position.x**2+x.position.y**2+(x.position.z-1))**0.5 < 1.5),
transitions={'true': 'kinematic test', 'false': 'too_far'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose_out'})
# x:190 y:147
OperatableStateMachine.add('first check',
CheckConditionState(predicate=lambda x: x.position.x<0.8),
transitions={'true': 'second check', 'false': 'too_far'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose_out'})
# x:196 y:253
OperatableStateMachine.add('second check',
CheckConditionState(predicate=lambda x: x.position.z>0.5),
transitions={'true': 'third check', 'false': 'too_far'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose_out'})
# x:99 y:520
OperatableStateMachine.add('kinematic test',
MoveitMove(move=False, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'ok', 'failed': 'too_far'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'pose_out'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| {
"content_hash": "41600c26df29cf0b4b35680645de1e7b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 115,
"avg_line_length": 32,
"alnum_prop": 0.6518342391304348,
"repo_name": "WalkingMachine/sara_behaviors",
"id": "964acf6bcfdb818f7ae341ce8f450e261785e925",
"size": "3351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sara_flexbe_behaviors/src/sara_flexbe_behaviors/check_reachability_sm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "6456"
},
{
"name": "CMake",
"bytes": "2065"
},
{
"name": "Python",
"bytes": "905600"
},
{
"name": "Shell",
"bytes": "2661"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Template'
db.create_table('reports_template', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(
to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')
(max_length=512)),
('details', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('model', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
))
db.send_create_signal('reports', ['Template'])
# Adding model 'Report'
db.create_table('reports_report', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(
to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')
(max_length=512)),
('template', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['reports.Template'], null=True, blank=True)),
('content', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
))
db.send_create_signal('reports', ['Report'])
def backwards(self, orm):
# Deleting model 'Template'
db.delete_table('reports_template')
# Deleting model 'Report'
db.delete_table('reports_report')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'everybody_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']"}),
'group_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}),
'user_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'reports.report': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Report', '_ormbases': ['core.Object']},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reports.Template']", 'null': 'True', 'blank': 'True'})
},
'reports.template': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Template', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['reports']
| {
"content_hash": "76d32d6bd042da522920fc09c6cb5c37",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 203,
"avg_line_length": 70.8840579710145,
"alnum_prop": 0.5510120629728072,
"repo_name": "havard024/prego",
"id": "6d8f7a4012aa84dba4fbac69f18c07c8970edabd",
"size": "9913",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "reports/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2978"
},
{
"name": "CSS",
"bytes": "620190"
},
{
"name": "JavaScript",
"bytes": "2456120"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "34948766"
},
{
"name": "Shell",
"bytes": "12359"
},
{
"name": "TeX",
"bytes": "113674"
}
],
"symlink_target": ""
} |
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pythonwetter.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "5dbac514f00b1401ed718d62b2968a83",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 29.071428571428573,
"alnum_prop": 0.7493857493857494,
"repo_name": "PatteWi/pythonwetter",
"id": "f9d60e293fc9de2c01d40f7441f94a1176c79b3a",
"size": "407",
"binary": false,
"copies": "2",
"ref": "refs/heads/si-project",
"path": "pythonwetter/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "31723"
},
{
"name": "Python",
"bytes": "26535"
}
],
"symlink_target": ""
} |
import ConfigParser
import sys, os, fnmatch
import re
import copy
import gen_utils
import glob
from meta_module import MetaModule
import modules
import mutex
from random import choice
from module_base import DefaultConfigClass
import time
import types
import traceback
# some notes with regards to extra module state/logic required for scheduling
# * in general, execute_module()/transfer_output()/etc calls do exactly that
# when called, i.e. they don't automatically cache. The scheduler should
# take care of caching by making the necessary isModified() or
# shouldTransfer() calls. The reason for this is so that the module
# actions can be forced
#
# notes with regards to execute on change:
# * devide.py should register a "change" handler with the ModuleManager.
# I've indicated places with "execute on change" where I think this
# handler should be invoked. devide.py can then invoke the scheduler.
#########################################################################
class ModuleManagerException(Exception):
pass
#########################################################################
class ModuleSearch:
"""Class for doing relative fast searches through module metadata.
@author Charl P. Botha <http://cpbotha.net/>
"""
def __init__(self):
# dict of dicts of tuple, e.g.:
# {'isosurface' : {('contour', 'keywords') : 1,
# ('marching', 'help') : 1} ...
self.search_dict = {}
self.previous_partial_text = ''
self.previous_results = None
def build_search_index(self, available_modules, available_segments):
"""Build search index given a list of available modules and segments.
@param available_modules: available modules dictionary from module
manager with module metadata classes as values.
@param available_segments: simple list of segments.
"""
self.search_dict.clear()
def index_field(index_name, mi_class, field_name, split=False):
try:
field = getattr(mi_class, field_name)
except AttributeError:
pass
else:
if split:
iter_list = field.split()
else:
iter_list = field
for w in iter_list:
wl = w.lower()
if wl not in self.search_dict:
self.search_dict[wl] = {(index_name,
field_name) : 1}
else:
self.search_dict[wl][(index_name,
field_name)] = 1
for module_name in available_modules:
mc = available_modules[module_name]
index_name = 'module:%s' % (module_name,)
short_module_name = mc.__name__.lower()
if short_module_name not in self.search_dict:
self.search_dict[short_module_name] = {(index_name, 'name') :
1}
else:
# we don't have to increment, there can be only one unique
# complete module_name
self.search_dict[short_module_name][(index_name, 'name')] = 1
index_field(index_name, mc, 'keywords')
index_field(index_name, mc, 'help', True)
for segment_name in available_segments:
index_name = 'segment:%s' % (segment_name,)
# segment name's are unique by definition (complete file names)
self.search_dict[segment_name] = {(index_name, 'name') : 1}
def find_matches(self, partial_text):
"""Do partial text (containment) search through all module names,
help and keywords.
Simple caching is currently done. Each space-separated word in
partial_text is searched for and results are 'AND'ed.
@returns: a list of unique tuples consisting of (modulename,
where_found) where where_found is 'name', 'keywords' or 'help'
"""
# cache results in case the user asks for exactly the same
if partial_text == self.previous_partial_text:
return self.previous_results
partial_words = partial_text.lower().split()
# dict mapping from full.module.name -> {'where_found' : 1, 'wf2' : 1}
# think about optimising this with a bit mask rather; less flexible
# but saves space and is at least as fast.
def find_one_word(search_word):
"""Searches for all partial / containment matches with
search_word.
@returns: search_results dict mapping from module name to
dictionary with where froms as keys and 1s as values.
"""
search_results = {}
for w in self.search_dict:
if w.find(search_word) >= 0:
# we can have partial matches with more than one key
# returning the same location, so we stuff results in a
# dict too to consolidate results
for k in self.search_dict[w].keys():
# k[1] is where_found, k[0] is module_name
if k[0] not in search_results:
search_results[k[0]] = {k[1] : 1}
else:
search_results[k[0]][k[1]] = 1
return search_results
# search using each of the words in the given list
search_results_list = []
for search_word in partial_words:
search_results_list.append(find_one_word(search_word))
# if more than one word, combine the results;
# a module + where_from result is only shown if ALL words occur in
# that specific module + where_from.
sr0 = search_results_list[0]
srl_len = len(search_results_list)
if srl_len > 1:
# will create brand-new combined search_results dict
search_results = {}
# iterate through all module names in the first word's results
for module_name in sr0:
# we will only process a module_name if it occurs in the
# search results of ALL search words
all_found = True
for sr in search_results_list[1:]:
if module_name not in sr:
all_found = False
break
# now only take results where ALL search words occur
# in the same where_from of a specific module
if all_found:
temp_finds = {}
for sr in search_results_list:
# sr[module_name] is a dict with where_founds as keys
# by definition (dictionary) all where_founds are
# unique per sr[module_name]
for i in sr[module_name].keys():
if i in temp_finds:
temp_finds[i] += 1
else:
temp_finds[i] = 1
# extract where_froms for which the number of hits is
# equal to the number of words.
temp_finds2 = [wf for wf in temp_finds.keys() if
temp_finds[wf] == srl_len]
# make new dictionary from temp_finds2 list as keys,
# 1 as value
search_results[module_name] = dict.fromkeys(temp_finds2,1)
else:
# only a single word was searched for.
search_results = sr0
self.previous_partial_text = partial_text
rl = search_results
self.previous_results = rl
return rl
#########################################################################
class PickledModuleState:
def __init__(self):
self.module_config = DefaultConfigClass()
# e.g. modules.Viewers.histogramSegment
self.module_name = None
# this is the unique name of the module, e.g. dvm15
self.instance_name = None
#########################################################################
class PickledConnection:
def __init__(self, source_instance_name=None, output_idx=None,
target_instance_name=None, input_idx=None, connection_type=None):
self.source_instance_name = source_instance_name
self.output_idx = output_idx
self.target_instance_name = target_instance_name
self.input_idx = input_idx
self.connection_type = connection_type
#########################################################################
class ModuleManager:
"""This class in responsible for picking up new modules in the modules
directory and making them available to the rest of the program.
@todo: we should split this functionality into a ModuleManager and
networkManager class. One ModuleManager per processing node,
global networkManager to coordinate everything.
@todo: ideally, ALL module actions should go via the MetaModule.
@author: Charl P. Botha <http://cpbotha.net/>
"""
def __init__(self, devide_app):
"""Initialise module manager by fishing .py devide modules from
all pertinent directories.
"""
self._devide_app = devide_app
# module dictionary, keyed on instance... cool.
# values are MetaModules
self._module_dict = {}
appdir = self._devide_app.get_appdir()
self._modules_dir = os.path.join(appdir, 'modules')
#sys.path.insert(0,self._modules_dir)
self._userModules_dir = os.path.join(appdir, 'userModules')
############################################################
# initialise module Kits - Kits are collections of libraries
# that modules can depend on. The Kits also make it possible
# for us to write hooks for when these libraries are imported
import module_kits
module_kits.load(self)
# binding to module that can be used without having to do
# import module_kits
self.module_kits = module_kits
##############################################################
self.module_search = ModuleSearch()
# make first scan of available modules
self.scan_modules()
# auto_execute mode, still need to link this up with the GUI
self.auto_execute = True
# this is a list of modules that have the ability to start a network
# executing all by themselves and usually do... when we break down
# a network, we should take these out first. when we build a network
# we should put them down last
# slice3dVWRs must be connected LAST, histogramSegment second to last
# and all the rest before them
self.consumerTypeTable = {'slice3dVWR' : 5,
'histogramSegment' : 4}
# we'll use this to perform mutex-based locking on the progress
# callback... (there SHOULD only be ONE ModuleManager instance)
self._inProgressCallback = mutex.mutex()
def refresh_module_kits(self):
"""Go through list of imported module kits, reload each one, and
also call its refresh() method if available.
This means a kit author can work on a module_kit and just refresh
when she wants her changes to be available. However, the kit must
have loaded successfully at startup, else no-go.
"""
for module_kit in self.module_kits.module_kit_list:
kit = getattr(self.module_kits, module_kit)
try:
refresh_method = getattr(kit, "refresh")
except AttributeError:
pass
else:
try:
reload(kit)
refresh_method()
except Exception, e:
self._devide_app.log_error_with_exception(
'Unable to refresh module_kit %s: '
'%s. Continuing...' %
(module_kit, str(e)))
else:
self.set_progress(100, 'Refreshed %s.' % (module_kit,))
def close(self):
"""Iterates through each module and closes it.
This is only called during devide application shutdown.
"""
self.delete_all_modules()
def delete_all_modules(self):
"""Deletes all modules.
This is usually only called during the offline mode of operation. In
view mode, the GraphEditor takes care of the deletion of all networks.
"""
# this is fine because .items() makes a copy of the dict
for mModule in self._module_dict.values():
print "Deleting %s (%s) >>>>>" % \
(mModule.instance_name,
mModule.instance.__class__.__name__)
try:
self.delete_module(mModule.instance)
except Exception, e:
# we can't allow a module to stop us
print "Error deleting %s (%s): %s" % \
(mModule.instance_name,
mModule.instance.__class__.__name__,
str(e))
print "FULL TRACE:"
traceback.print_exc()
def apply_module_view_to_logic(self, instance):
"""Interface method that can be used by clients to transfer module
view to underlying logic.
This is called by module_utils (the ECASH button handlers) and thunks
through to the relevant MetaModule call.
"""
mModule = self._module_dict[instance]
try:
# these two MetaModule wrapper calls will take care of setting
# the modified flag / time correctly
if self._devide_app.view_mode:
# only in view mode do we call this transfer
mModule.view_to_config()
mModule.config_to_logic()
# we round-trip so that view variables that are dependent on
# the effective changes to logic and/or config can update
instance.logic_to_config()
if self._devide_app.view_mode:
instance.config_to_view()
except Exception, e:
# we are directly reporting the error, as this is used by
# a utility function that is too compact to handle an
# exception by itself. Might change in the future.
self._devide_app.log_error_with_exception(str(e))
def sync_module_logic_with_config(self, instance):
"""Method that should be called during __init__ for all (view and
non-view) modules, after the config structure has been set.
In the view() method, or after having setup the view in view-modules,
also call syncModuleViewWithLogic()
"""
instance.config_to_logic()
instance.logic_to_config()
def sync_module_view_with_config(self, instance):
"""If DeVIDE is in view model, transfor config information to view
and back again. This is called AFTER sync_module_logic_with_config(),
usually in the module view() method after createViewFrame().
"""
if self._devide_app.view_mode:
# in this case we don't round trip, view shouldn't change
# things that affect the config.
instance.config_to_view()
def sync_module_view_with_logic(self, instance):
"""Interface method that can be used by clients to transfer config
information from the underlying module logic (model) to the view.
At the moment used by standard ECASH handlers.
"""
try:
instance.logic_to_config()
# we only do the view transfer if DeVIDE is in the correct mode
if self._devide_app.view_mode:
instance.config_to_view()
except Exception, e:
# we are directly reporting the error, as this is used by
# a utility function that is too compact to handle an
# exception by itself. Might change in the future.
self._devide_app.log_error_with_exception(str(e))
syncModuleViewWithLogic = sync_module_view_with_logic
def blockmodule(self, meta_module):
meta_module.blocked = True
def unblockmodule(self, meta_module):
meta_module.blocked = False
def log_error(self, message):
"""Convenience method that can be used by modules.
"""
self._devide_app.log_error(message)
def log_error_list(self, message_list):
self._devide_app.log_error_list(message_list)
def log_error_with_exception(self, message):
"""Convenience method that can be used by modules.
"""
self._devide_app.log_error_with_exception(message)
def log_info(self, message):
"""Convenience method that can be used by modules.
"""
self._devide_app.log_info(message)
def log_message(self, message):
"""Convenience method that can be used by modules.
"""
self._devide_app.log_message(message)
def log_warning(self, message):
"""Convenience method that can be used by modules.
"""
self._devide_app.log_warning(message)
def scan_modules(self):
"""(Re)Check the modules directory for *.py files and put them in
the list self.module_files.
"""
# this is a dict mapping from full module name to the classes as
# found in the module_index.py files
self._available_modules = {}
appDir = self._devide_app.get_appdir()
# module path without trailing slash
modulePath = self.get_modules_dir()
# search through modules hierarchy and pick up all module_index files
####################################################################
module_indices = []
def miwFunc(arg, dirname, fnames):
"""arg is top-level module path.
"""
module_path = arg
for fname in fnames:
mi_full_name = os.path.join(dirname, fname)
if not fnmatch.fnmatch(fname, 'module_index.py'):
continue
# e.g. /viewers/module_index
mi2 = os.path.splitext(
mi_full_name.replace(module_path, ''))[0]
# e.g. .viewers.module_index
mim = mi2.replace(os.path.sep, '.')
# remove . before
if mim.startswith('.'):
mim = mim[1:]
# special case: modules in the central devide
# module dir should be modules.viewers.module_index
# this is mostly for backward compatibility
if module_path == modulePath:
mim = 'modules.%s' % (mim)
module_indices.append(mim)
os.path.walk(modulePath, miwFunc, arg=modulePath)
for emp in self.get_app_main_config().extra_module_paths:
# make sure there are no extra spaces at the ends, as well
# as normalize and absolutize path (haha) for this
# platform
emp = os.path.abspath(emp.strip())
if emp and os.path.exists(emp):
# make doubly sure we only process an EMP if it's
# really there
if emp not in sys.path:
sys.path.insert(0,emp)
os.path.walk(emp, miwFunc, arg=emp)
# iterate through the moduleIndices, building up the available
# modules list.
import module_kits # we'll need this to check available kits
failed_mis = {}
for mim in module_indices:
# mim is importable module_index spec, e.g.
# modules.viewers.module_index
# if this thing was imported before, we have to remove it, else
# classes that have been removed from the module_index file
# will still appear after the reload.
if mim in sys.modules:
del sys.modules[mim]
try:
# now we can import
__import__(mim, globals(), locals())
except Exception, e:
# make a list of all failed moduleIndices
failed_mis[mim] = sys.exc_info()
msgs = gen_utils.exceptionToMsgs()
# and log them as mesages
self._devide_app.log_info(
'Error loading %s: %s.' % (mim, str(e)))
for m in msgs:
self._devide_app.log_info(m.strip(), timeStamp=False)
# we don't want to throw an exception here, as that would
# mean that a singe misconfigured module_index file can
# prevent the whole scan_modules process from completing
# so we'll report on errors here and at the end
else:
# reload, as this could be a run-time rescan
m = sys.modules[mim]
reload(m)
# find all classes in the imported module
cs = [a for a in dir(m)
if type(getattr(m,a)) == types.ClassType]
# stuff these classes, keyed on the module name that they
# represent, into the modules list.
for a in cs:
# a is the name of the class
c = getattr(m,a)
module_deps = True
for kit in c.kits:
if kit not in module_kits.module_kit_list:
module_deps = False
break
if module_deps:
module_name = mim.replace('module_index', a)
self._available_modules[module_name] = c
# we should move this functionality to the graphEditor. "segments"
# are _probably_ only valid there... alternatively, we should move
# the concept here
segmentList = []
def swFunc(arg, dirname, fnames):
segmentList.extend([os.path.join(dirname, fname)
for fname in fnames
if fnmatch.fnmatch(fname, '*.dvn')])
os.path.walk(os.path.join(appDir, 'segments'), swFunc, arg=None)
# this is purely a list of segment filenames
self.availableSegmentsList = segmentList
# self._available_modules is a dict keyed on module_name with
# module description class as value
self.module_search.build_search_index(self._available_modules,
self.availableSegmentsList)
# report on accumulated errors - this is still a non-critical error
# so we don't throw an exception.
if len(failed_mis) > 0:
failed_indices = '\n'.join(failed_mis.keys())
self._devide_app.log_error(
'The following module indices failed to load '
'(see message log for details): \n%s' \
% (failed_indices,))
self._devide_app.log_info(
'%d modules and %d segments scanned.' %
(len(self._available_modules), len(self.availableSegmentsList)))
########################################################################
def get_appdir(self):
return self._devide_app.get_appdir()
def get_app_main_config(self):
return self._devide_app.main_config
def get_available_modules(self):
"""Return the available_modules, a dictionary keyed on fully qualified
module name (e.g. modules.Readers.vtiRDR) with values the classes
defined in module_index files.
"""
return self._available_modules
def get_instance(self, instance_name):
"""Given the unique instance name, return the instance itself.
If the module doesn't exist, return None.
"""
found = False
for instance, mModule in self._module_dict.items():
if mModule.instance_name == instance_name:
found = True
break
if found:
return mModule.instance
else:
return None
def get_instance_name(self, instance):
"""Given the actual instance, return its unique instance. If the
instance doesn't exist in self._module_dict, return the currently
halfborn instance.
"""
try:
return self._module_dict[instance].instance_name
except Exception:
return self._halfBornInstanceName
def get_meta_module(self, instance):
"""Given an instance, return the corresponding meta_module.
@param instance: the instance whose meta_module should be returned.
@return: meta_module corresponding to instance.
@raise KeyError: this instance doesn't exist in the module_dict.
"""
return self._module_dict[instance]
def get_modules_dir(self):
return self._modules_dir
def get_module_view_parent_window(self):
# this could change
return self.get_module_view_parent_window()
def get_module_spec(self, module_instance):
"""Given a module instance, return the full module spec.
"""
return 'module:%s' % (module_instance.__class__.__module__,)
def get_module_view_parent_window(self):
"""Get parent window for module windows.
THIS METHOD WILL BE DEPRECATED. The ModuleManager and view-less
(back-end) modules shouldn't know ANYTHING about windows or UI
aspects.
"""
try:
return self._devide_app.get_interface().get_main_window()
except AttributeError:
# the interface has no main_window
return None
def create_module(self, fullName, instance_name=None):
"""Try and create module fullName.
@param fullName: The complete module spec below application directory,
e.g. modules.Readers.hdfRDR.
@return: module_instance if successful.
@raises ModuleManagerException: if there is a problem creating the
module.
"""
if fullName not in self._available_modules:
raise ModuleManagerException(
'%s is not available in the current Module Manager / '
'Kit configuration.' % (fullName,))
try:
# think up name for this module (we have to think this up now
# as the module might want to know about it whilst it's being
# constructed
instance_name = self._make_unique_instance_name(instance_name)
self._halfBornInstanceName = instance_name
# perform the conditional import/reload
self.import_reload(fullName)
# import_reload requires this afterwards for safety reasons
exec('import %s' % fullName)
# in THIS case, there is a McMillan hook which'll tell the
# installer about all the devide modules. :)
ae = self.auto_execute
self.auto_execute = False
try:
# then instantiate the requested class
module_instance = None
exec(
'module_instance = %s.%s(self)' % (fullName,
fullName.split('.')[-1]))
finally:
# do the following in all cases:
self.auto_execute = ae
# if there was an exception, it will now be re-raised
if hasattr(module_instance, 'PARTS_TO_INPUTS'):
pti = module_instance.PARTS_TO_INPUTS
else:
pti = None
if hasattr(module_instance, 'PARTS_TO_OUTPUTS'):
pto = module_instance.PARTS_TO_OUTPUTS
else:
pto = None
# and store it in our internal structures
self._module_dict[module_instance] = MetaModule(
module_instance, instance_name, fullName, pti, pto)
# it's now fully born ;)
self._halfBornInstanceName = None
except ImportError, e:
# we re-raise with the three argument form to retain full
# trace information.
es = "Unable to import module %s: %s" % (fullName, str(e))
raise ModuleManagerException, es, sys.exc_info()[2]
except Exception, e:
es = "Unable to instantiate module %s: %s" % (fullName, str(e))
raise ModuleManagerException, es, sys.exc_info()[2]
# return the instance
return module_instance
def import_reload(self, fullName):
"""This will import and reload a module if necessary. Use this only
for things in modules or userModules.
If we're NOT running installed, this will run import on the module.
If it's not the first time this module is imported, a reload will
be run straight after.
If we're running installed, reloading only makes sense for things in
userModules, so it's only done for these modules. At the moment,
the stock Installer reload() is broken. Even with my fixes, it doesn't
recover memory used by old modules, see:
http://trixie.triqs.com/pipermail/installer/2003-May/000303.html
This is one of the reasons we try to avoid unnecessary reloads.
You should use this as follows:
ModuleManager.import_reloadModule('full.path.to.my.module')
import full.path.to.my.module
so that the module is actually brought into the calling namespace.
import_reload used to return the modulePrefix object, but this has
been changed to force module writers to use a conventional import
afterwards so that the McMillan installer will know about dependencies.
"""
# this should yield modules or userModules
modulePrefix = fullName.split('.')[0]
# determine whether this is a new import
if not sys.modules.has_key(fullName):
newModule = True
else:
newModule = False
# import the correct module - we have to do this in anycase to
# get the thing into our local namespace
exec('import ' + fullName)
# there can only be a reload if this is not a newModule
if not newModule:
exec('reload(' + fullName + ')')
# we need to inject the import into the calling dictionary...
# importing my.module results in "my" in the dictionary, so we
# split at '.' and return the object bound to that name
# return locals()[modulePrefix]
# we DON'T do this anymore, so that module writers are forced to
# use an import statement straight after calling import_reload (or
# somewhere else in the module)
def isInstalled(self):
"""Returns True if devide is running from an Installed state.
Installed of course refers to being installed with Gordon McMillan's
Installer. This can be used by devide modules to determine whether
they should use reload or not.
"""
return hasattr(modules, '__importsub__')
def execute_module(self, meta_module, part=0, streaming=False):
"""Execute module instance.
Important: this method does not result in data being transferred
after the execution, it JUST performs the module execution. This
method is called by the scheduler during network execution. No
other call should be used to execute a single module!
@param instance: module instance to be executed.
@raise ModuleManagerException: this exception is raised with an
informative error string if a module fails to execute.
@return: Nothing.
"""
try:
# this goes via the MetaModule so that time stamps and the
# like are correctly reported
meta_module.execute_module(part, streaming)
except Exception, e:
# get details about the errored module
instance_name = meta_module.instance_name
module_name = meta_module.instance.__class__.__name__
# and raise the relevant exception
es = 'Unable to execute part %d of module %s (%s): %s' \
% (part, instance_name, module_name, str(e))
# we use the three argument form so that we can add a new
# message to the exception but we get to see the old traceback
# see: http://docs.python.org/ref/raise.html
raise ModuleManagerException, es, sys.exc_info()[2]
def execute_network(self, startingModule=None):
"""Execute local network in order, starting from startingModule.
This is a utility method used by module_utils to bind to the Execute
control found on must module UIs. We are still in the process
of formalising the concepts of networks vs. groups of modules.
Eventually, networks will be grouped by process node and whatnot.
@todo: integrate concept of startingModule.
"""
try:
self._devide_app.network_manager.execute_network(
self._module_dict.values())
except Exception, e:
# if an error occurred, but progress is not at 100% yet,
# we have to put it there, else app remains in visually
# busy state.
if self._devide_app.get_progress() < 100.0:
self._devide_app.set_progress(
100.0, 'Error during network execution.')
# we are directly reporting the error, as this is used by
# a utility function that is too compact to handle an
# exception by itself. Might change in the future.
self._devide_app.log_error_with_exception(str(e))
def view_module(self, instance):
instance.view()
def delete_module(self, instance):
"""Destroy module.
This will disconnect all module inputs and outputs and call the
close() method. This method is used by the graphEditor and by
the close() method of the ModuleManager.
@raise ModuleManagerException: if an error occurs during module
deletion.
"""
# get details about the module (we might need this later)
meta_module = self._module_dict[instance]
instance_name = meta_module.instance_name
module_name = meta_module.instance.__class__.__name__
# first disconnect all outgoing connections
inputs = self._module_dict[instance].inputs
outputs = self._module_dict[instance].outputs
# outputs is a list of lists of tuples, each tuple containing
# module_instance and input_idx of the consumer module
for output in outputs:
if output:
# we just want to walk through the dictionary tuples
for consumer in output:
# disconnect all consumers
self.disconnect_modules(consumer[0], consumer[1])
# inputs is a list of tuples, each tuple containing module_instance
# and output_idx of the producer/supplier module
for input_idx in range(len(inputs)):
try:
# also make sure we fully disconnect ourselves from
# our producers
self.disconnect_modules(instance, input_idx)
except Exception, e:
# we can't allow this to prevent a destruction, just log
self.log_error_with_exception(
'Module %s (%s) errored during disconnect of input %d. '
'Continuing with deletion.' % \
(instance_name, module_name, input_idx))
# set supplier to None - so we know it's nuked
inputs[input_idx] = None
# we've disconnected completely - let's reset all lists
self._module_dict[instance].reset_inputsOutputs()
# store autoexecute, then disable
ae = self.auto_execute
self.auto_execute = False
try:
try:
# now we can finally call close on the instance
instance.close()
finally:
# do the following in all cases:
# 1. remove module from our dict
del self._module_dict[instance]
# 2. reset auto_execute mode
self.auto_execute = ae
# the exception will now be re-raised if there was one
# to begin with.
except Exception, e:
# we're going to re-raise the exception: this method could be
# called by other parties that need to do alternative error
# handling
# create new exception message
es = 'Error calling close() on module %s (%s): %s' \
% (instance_name, module_name, str(e))
# we use the three argument form so that we can add a new
# message to the exception but we get to see the old traceback
# see: http://docs.python.org/ref/raise.html
raise ModuleManagerException, es, sys.exc_info()[2]
def connect_modules(self, output_module, output_idx,
input_module, input_idx):
"""Connect output_idx'th output of provider output_module to
input_idx'th input of consumer input_module. If an error occurs
during connection, an exception will be raised.
@param output_module: This is a module instance.
"""
# record connection (this will raise an exception if the input
# is already occupied)
self._module_dict[input_module].connectInput(
input_idx, output_module, output_idx)
# record connection on the output of the producer module
# this will also initialise the transfer times
self._module_dict[output_module].connectOutput(
output_idx, input_module, input_idx)
def disconnect_modules(self, input_module, input_idx):
"""Disconnect a consumer module from its provider.
This method will disconnect input_module from its provider by
disconnecting the link between the provider and input_module at
the input_idx'th input port of input_module.
All errors will be handled internally in this function, i.e. no
exceptions will be raised.
@todo: factor parts of this out into the MetaModule.
FIXME: continue here... (we can start converting some of
the modules to shallow copy their data; especially the slice3dVWR
is a problem child.)
"""
meta_module = self._module_dict[input_module]
instance_name = meta_module.instance_name
module_name = meta_module.instance.__class__.__name__
try:
input_module.set_input(input_idx, None)
except Exception, e:
# if the module errors during disconnect, we have no choice
# but to continue with deleting it from our metadata
# at least this way, no data transfers will be attempted during
# later network executions.
self._devide_app.log_error_with_exception(
'Module %s (%s) errored during disconnect of input %d. '
'Removing link anyway.' % \
(instance_name, module_name, input_idx))
# trace it back to our supplier, and tell it that it has one
# less consumer (if we even HAVE a supplier on this port)
s = self._module_dict[input_module].inputs[input_idx]
if s:
supp = s[0]
suppOutIdx = s[1]
self._module_dict[supp].disconnectOutput(
suppOutIdx, input_module, input_idx)
# indicate to the meta data that this module doesn't have an input
# anymore
self._module_dict[input_module].disconnectInput(input_idx)
def deserialise_module_instances(self, pmsDict, connectionList):
"""Given a pickled stream, this method will recreate all modules,
configure them and connect them up.
@returns: (newModulesDict, connectionList) - newModulesDict maps from
serialised/OLD instance name to newly created instance; newConnections
is a connectionList of the connections taht really were made during
the deserialisation.
@TODO: this should go to NetworkManager and should return meta_modules
in the dictionary, not module instances.
"""
# store and deactivate auto-execute
ae = self.auto_execute
self.auto_execute = False
# newModulesDict will act as translator between pickled instance_name
# and new instance!
newModulesDict = {}
failed_modules_dict = []
for pmsTuple in pmsDict.items():
# each pmsTuple == (instance_name, pms)
# we're only going to try to create a module if the module_man
# says it's available!
try:
newModule = self.create_module(pmsTuple[1].module_name)
except ModuleManagerException, e:
self._devide_app.log_error_with_exception(
'Could not create module %s:\n%s.' %
(pmsTuple[1].module_name, str(e)))
# make sure
newModule = None
if newModule:
# set its config!
try:
# we need to DEEP COPY the config, else it could easily
# happen that objects have bindings to the same configs!
# to see this go wrong, switch off the deepcopy, create
# a network by copying/pasting a vtkPolyData, load
# two datasets into a slice viewer... now save the whole
# thing and load it: note that the two readers are now
# reading the same file!
configCopy = copy.deepcopy(pmsTuple[1].module_config)
# the API says we have to call get_config() first,
# so that the module has another place where it
# could lazy prepare the thing (slice3dVWR does
# this)
cfg = newModule.get_config()
# now we merge the stored config with the new
# module config
cfg.__dict__.update(configCopy.__dict__)
# and then we set it back with set_config
newModule.set_config(cfg)
except Exception, e:
# it could be a module with no defined config logic
self._devide_app.log_warning(
'Could not restore state/config to module %s: %s' %
(newModule.__class__.__name__, e))
# try to rename the module to the pickled unique instance name
# if this name is already taken, use the generated unique instance
# name
self.rename_module(newModule,pmsTuple[1].instance_name)
# and record that it's been recreated (once again keyed
# on the OLD unique instance name)
newModulesDict[pmsTuple[1].instance_name] = newModule
# now we're going to connect all of the successfully created
# modules together; we iterate DOWNWARDS through the different
# consumerTypes
newConnections = []
for connection_type in range(max(self.consumerTypeTable.values()) + 1):
typeConnections = [connection for connection in connectionList
if connection.connection_type == connection_type]
for connection in typeConnections:
if newModulesDict.has_key(connection.source_instance_name) and \
newModulesDict.has_key(connection.target_instance_name):
sourceM = newModulesDict[connection.source_instance_name]
targetM = newModulesDict[connection.target_instance_name]
# attempt connecting them
print "connecting %s:%d to %s:%d..." % \
(sourceM.__class__.__name__, connection.output_idx,
targetM.__class__.__name__, connection.input_idx)
try:
self.connect_modules(sourceM, connection.output_idx,
targetM, connection.input_idx)
except:
pass
else:
newConnections.append(connection)
# now do the POST connection module config!
for oldInstanceName,newModuleInstance in newModulesDict.items():
# retrieve the pickled module state
pms = pmsDict[oldInstanceName]
# take care to deep copy the config
configCopy = copy.deepcopy(pms.module_config)
# now try to call set_configPostConnect
try:
newModuleInstance.set_configPostConnect(configCopy)
except AttributeError:
pass
except Exception, e:
# it could be a module with no defined config logic
self._devide_app.log_warning(
'Could not restore post connect state/config to module '
'%s: %s' % (newModuleInstance.__class__.__name__, e))
# reset auto_execute
self.auto_execute = ae
# we return a dictionary, keyed on OLD pickled name with value
# the newly created module-instance and a list with the connections
return (newModulesDict, newConnections)
def request_auto_execute_network(self, module_instance):
"""Method that can be called by an interaction/view module to
indicate that some action by the user should result in a network
update. The execution will only be performed if the
AutoExecute mode is active.
"""
if self.auto_execute:
print "auto_execute ##### #####"
self.execute_network()
def serialise_module_instances(self, module_instances):
"""Given
"""
# dictionary of pickled module instances keyed on unique module
# instance name
pmsDict = {}
# we'll use this list internally to check later (during connection
# pickling) which modules are being written away
pickledModuleInstances = []
for module_instance in module_instances:
if self._module_dict.has_key(module_instance):
# first get the MetaModule
mModule = self._module_dict[module_instance]
# create a picklable thingy
pms = PickledModuleState()
try:
print "SERIALISE: %s - %s" % \
(str(module_instance),
str(module_instance.get_config()))
pms.module_config = module_instance.get_config()
except AttributeError, e:
self._devide_app.log_warning(
'Could not extract state (config) from module %s: %s' \
% (module_instance.__class__.__name__, str(e)))
# if we can't get a config, we pickle a default
pms.module_config = DefaultConfigClass()
#pms.module_name = module_instance.__class__.__name__
# we need to store the complete module name
# we could also get this from meta_module.module_name
pms.module_name = module_instance.__class__.__module__
# this will only be used for uniqueness purposes
pms.instance_name = mModule.instance_name
pmsDict[pms.instance_name] = pms
pickledModuleInstances.append(module_instance)
# now iterate through all the actually pickled module instances
# and store all connections in a connections list
# three different types of connections:
# 0. connections with source modules with no inputs
# 1. normal connections
# 2. connections with targets that are exceptions, e.g. sliceViewer
connectionList = []
for module_instance in pickledModuleInstances:
mModule = self._module_dict[module_instance]
# we only have to iterate through all outputs
for output_idx in range(len(mModule.outputs)):
outputConnections = mModule.outputs[output_idx]
# each output can of course have multiple outputConnections
# each outputConnection is a tuple:
# (consumerModule, consumerInputIdx)
for outputConnection in outputConnections:
if outputConnection[0] in pickledModuleInstances:
# this means the consumerModule is also one of the
# modules to be pickled and so this connection
# should be stored
# find the type of connection (1, 2, 3), work from
# the back...
moduleClassName = \
outputConnection[0].__class__.__name__
if moduleClassName in self.consumerTypeTable:
connection_type = self.consumerTypeTable[
moduleClassName]
else:
connection_type = 1
# FIXME: we still have to check for 0: iterate
# through all inputs, check that none of the
# supplier modules are in the list that we're
# going to pickle
print '%s has connection type %d' % \
(outputConnection[0].__class__.__name__,
connection_type)
connection = PickledConnection(
mModule.instance_name, output_idx,
self._module_dict[outputConnection[0]].instance_name,
outputConnection[1],
connection_type)
connectionList.append(connection)
return (pmsDict, connectionList)
def generic_progress_callback(self, progressObject,
progressObjectName, progress, progressText):
"""progress between 0.0 and 1.0.
"""
if self._inProgressCallback.testandset():
# first check if execution has been disabled
# the following bit of code is risky: the ITK to VTK bridges
# seem to be causing segfaults when we abort too soon
# if not self._executionEnabled:
# try:
# progressObject.SetAbortExecute(1)
# except Exception:
# pass
# try:
# progressObject.SetAbortGenerateData(1)
# except Exception:
# pass
# progress = 1.0
# progressText = 'Execution ABORTED.'
progressP = progress * 100.0
fullText = '%s: %s' % (progressObjectName, progressText)
if abs(progressP - 100.0) < 0.01:
# difference smaller than a hundredth
fullText += ' [DONE]'
self.setProgress(progressP, fullText)
self._inProgressCallback.unlock()
def get_consumers(self, meta_module):
"""Determine meta modules that are connected to the outputs of
meta_module.
This method is called by: scheduler, self.recreate_module_in_place.
@todo: this should be part of the MetaModule code, as soon as
the MetaModule inputs and outputs are of type MetaModule and not
instance.
@param instance: module instance of which the consumers should be
determined.
@return: list of tuples, each consisting of (this module's output
index, the consumer meta module, the consumer input index)
"""
consumers = []
# get outputs from MetaModule: this is a list of list of tuples
# outer list has number of outputs elements
# inner lists store consumer modules for that output
# tuple contains (consumerModuleInstance, consumerInputIdx)
outputs = meta_module.outputs
for output_idx in range(len(outputs)):
output = outputs[output_idx]
for consumerInstance, consumerInputIdx in output:
consumerMetaModule = self._module_dict[consumerInstance]
consumers.append(
(output_idx, consumerMetaModule, consumerInputIdx))
return consumers
def get_producers(self, meta_module):
"""Return a list of meta modules, output indices and the input
index through which they supply 'meta_module' with data.
@todo: this should be part of the MetaModule code, as soon as
the MetaModule inputs and outputs are of type MetaModule and not
instance.
@param meta_module: consumer meta module.
@return: list of tuples, each tuple consists of producer meta module
and output index as well as input index of the instance input that
they connect to.
"""
# inputs is a list of tuples, each tuple containing module_instance
# and output_idx of the producer/supplier module; if the port is
# not connected, that position in inputs contains "None"
inputs = meta_module.inputs
producers = []
for i in range(len(inputs)):
pTuple = inputs[i]
if pTuple is not None:
# unpack
pInstance, pOutputIdx = pTuple
pMetaModule = self._module_dict[pInstance]
# and store
producers.append((pMetaModule, pOutputIdx, i))
return producers
def setModified_DEPRECATED(self, module_instance):
"""Changed modified ivar in MetaModule.
This ivar is used to determine whether module_instance needs to be
executed to become up to date. It should be set whenever changes
are made that dirty the module state, for example parameter changes
or topology changes.
@param module_instance: the instance whose modified state sbould be
changed.
@param value: the new value of the modified ivar, True or False.
"""
self._module_dict[module_instance].modified = True
def set_progress(self, progress, message, noTime=False):
"""Progress is in percent.
"""
self._devide_app.set_progress(progress, message, noTime)
setProgress = set_progress
def _make_unique_instance_name(self, instance_name=None):
"""Ensure that instance_name is unique or create a new unique
instance_name.
If instance_name is None, a unique one will be created. An
instance_name (whether created or passed) will be permuted until it
unique and then returned.
"""
# first we make sure we have a unique instance name
if not instance_name:
instance_name = "dvm%d" % (len(self._module_dict),)
# now make sure that instance_name is unique
uniqueName = False
while not uniqueName:
# first check that this doesn't exist in the module dictionary
uniqueName = True
for mmt in self._module_dict.items():
if mmt[1].instance_name == instance_name:
uniqueName = False
break
if not uniqueName:
# this means that this exists already!
# create a random 3 character string
chars = 'abcdefghijklmnopqrstuvwxyz'
tl = ""
for i in range(3):
tl += choice(chars)
instance_name = "%s%s%d" % (instance_name, tl,
len(self._module_dict))
return instance_name
def rename_module(self, instance, name):
"""Rename a module in the module dictionary
"""
# if we get a duplicate name, we either add (%d) after, or replace
# the existing %d with something that makes it all unique...
mo = re.match('(.*)\s+\(([1-9]+)\)', name)
if mo:
basename = mo.group(1)
i = int(mo.group(2)) + 1
else:
basename = name
i = 1
while (self.get_instance(name) != None):
# add a number (%d) until the name is unique
name = '%s (%d)' % (basename, i)
i += 1
try:
# get the MetaModule and rename it.
self._module_dict[instance].instance_name = name
except Exception:
return False
# some modules and mixins support the rename call and use it
# to change their window titles. Neat.
# this was added on 20090322, so it's not supported
# everywhere.
try:
instance.rename(name)
except AttributeError:
pass
# everything proceeded according to plan.
# so return the new name
return name
def modify_module(self, module_instance, part=0):
"""Call this whenever module state has changed in such a way that
necessitates a re-execution, for instance when parameters have been
changed or when new input data has been transferred.
"""
self._module_dict[module_instance].modify(part)
modify_module = modify_module
def recreate_module_in_place(self, meta_module):
"""Destroy, create and reconnect a module in place.
This function works but is not being used at the moment. It was
intended for graphEditor-driven module reloading, but it turned out
that implementing that in the graphEditor was easier. I'm keeping
this here for reference purposes.
@param meta_module: The module that will be destroyed.
@returns: new meta_module.
"""
# 1. store input and output connections, module name, module state
#################################################################
# prod_tuple contains a list of (prod_meta_module, output_idx,
# input_idx) tuples
prod_tuples = self.get_producers(meta_module)
# cons_tuples contains a list of (output_index, consumer_meta_module,
# consumer input index)
cons_tuples = self.get_consumers(meta_module)
# store the instance name
instance_name = meta_module.instance_name
# and the full module spec name
full_name = meta_module.module_name
# and get the module state (we make a deep copy just in case)
module_config = copy.deepcopy(meta_module.instance.get_config())
# 2. instantiate a new one and give it its old config
###############################################################
# because we instantiate the new one first, if this instantiation
# breaks, an exception will be raised and no harm will be done,
# we still have the old one lying around
# instantiate
new_instance = self.create_module(full_name, instance_name)
# and give it its old config back
new_instance.set_config(module_config)
# 3. delete the old module
#############################################################
self.delete_module(meta_module.instance)
# 4. now rename the new module
#############################################################
# find the corresponding new meta_module
meta_module = self._module_dict[new_instance]
# give it its old name back
meta_module.instance_name = instance_name
# 5. connect it up
#############################################################
for producer_meta_module, output_idx, input_idx in prod_tuples:
self.connect_modules(
producer_meta_module.instance, output_idx,
new_instance, input_idx)
for output_idx, consumer_meta_module, input_idx in cons_tuples:
self.connect_modules(
new_instance, output_idx,
consumer_meta_module.instance, input_idx)
# we should be done now
return meta_module
def should_execute_module(self, meta_module, part=0):
"""Determine whether module_instance requires execution to become
up to date.
Execution is required when the module is new or when the user has made
parameter or introspection changes. All of these conditions should be
indicated by calling L{ModuleManager.modify(instance)}.
@return: True if execution required, False if not.
"""
return meta_module.shouldExecute(part)
def should_transfer_output(
self,
meta_module, output_idx, consumer_meta_module,
consumer_input_idx, streaming=False):
"""Determine whether output data has to be transferred from
module_instance via output outputIndex to module consumerInstance.
Output needs to be transferred if:
- module_instance has new or changed output (i.e. it has
executed after the previous transfer)
- consumerInstance does not have the data yet
Both of these cases are handled with a transfer timestamp per
output connection (defined by output idx, consumer module, and
consumer input idx)
This method is used by the scheduler.
@return: True if output should be transferred, False if not.
"""
return meta_module.should_transfer_output(
output_idx, consumer_meta_module, consumer_input_idx,
streaming)
def transfer_output(
self,
meta_module, output_idx, consumer_meta_module,
consumer_input_idx, streaming=False):
"""Transfer output data from module_instance to the consumer modules
connected to its specified output indexes.
This will be called by the scheduler right before execution to
transfer the given output from module_instance instance to the correct
input on the consumerInstance. In general, this is only done if
should_transfer_output is true, so the number of unnecessary transfers
should be minimised.
This method is in ModuleManager and not in MetaModule because it
involves more than one MetaModule.
@param module_instance: producer module whose output data must be
transferred.
@param outputIndex: only output data produced by this output will
be transferred.
@param consumerInstance: only data going to this instance will be
transferred.
@param consumerInputIdx: data enters consumerInstance via this input
port.
@raise ModuleManagerException: if an error occurs getting the data
from or transferring it to a new module.
"""
#print 'transferring data %s:%d' % (module_instance.__class__.__name__,
# outputIndex)
# double check that this connection already exists
consumer_instance = consumer_meta_module.instance
if meta_module.findConsumerInOutputConnections(
output_idx, consumer_instance, consumer_input_idx) == -1:
raise Exception, 'ModuleManager.transfer_output called for ' \
'connection that does not exist.'
try:
# get data from producerModule output
od = meta_module.instance.get_output(output_idx)
except Exception, e:
# get details about the errored module
instance_name = meta_module.instance_name
module_name = meta_module.instance.__class__.__name__
# and raise the relevant exception
es = 'Faulty transfer_output (get_output on module %s (%s)): %s' \
% (instance_name, module_name, str(e))
# we use the three argument form so that we can add a new
# message to the exception but we get to see the old traceback
# see: http://docs.python.org/ref/raise.html
raise ModuleManagerException, es, sys.exc_info()[2]
# we only disconnect if we're NOT streaming!
if not streaming:
# experiment here with making shallowcopies if we're working with
# VTK data. I've double-checked (20071027): calling update on
# a shallowcopy is not able to get a VTK pipeline to execute.
# TODO: somehow this should be part of one of the moduleKits
# or some other module-related pluggable logic.
if od is not None and hasattr(od, 'GetClassName') and hasattr(od, 'ShallowCopy'):
nod = od.__class__()
nod.ShallowCopy(od)
od = nod
try:
# set on consumerInstance input
consumer_meta_module.instance.set_input(consumer_input_idx, od)
except Exception, e:
# get details about the errored module
instance_name = consumer_meta_module.instance_name
module_name = consumer_meta_module.instance.__class__.__name__
# and raise the relevant exception
es = 'Faulty transfer_output (set_input on module %s (%s)): %s' \
% (instance_name, module_name, str(e))
# we use the three argument form so that we can add a new
# message to the exception but we get to see the old traceback
# see: http://docs.python.org/ref/raise.html
raise ModuleManagerException, es, sys.exc_info()[2]
# record that the transfer has just happened
meta_module.timeStampTransferTime(
output_idx, consumer_instance, consumer_input_idx,
streaming)
# also invalidate the consumerModule: it should re-execute when
# a transfer has been made. We only invalidate the part that
# takes responsibility for that input.
part = consumer_meta_module.getPartForInput(consumer_input_idx)
consumer_meta_module.modify(part)
#print "modified", consumer_meta_module.instance.__class__.__name__
# execute on change
# we probably shouldn't automatically execute here... transfers
# mean that some sort of network execution is already running
| {
"content_hash": "d298bf7acff63bebfc0d32157266855b",
"timestamp": "",
"source": "github",
"line_count": 1690,
"max_line_length": 93,
"avg_line_length": 40.418343195266274,
"alnum_prop": 0.5656521293571669,
"repo_name": "ivoflipse/devide",
"id": "694a8d9bac6db3cd11db16cdaa4b6b7595972775",
"size": "68402",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "module_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3102319"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
import json
from django.http import HttpResponse
from unittest import mock
import pytest
import requests
import settings
from olympia import amo
from olympia.amo.tests import addon_factory
from olympia.discovery.models import DiscoveryItem
from olympia.discovery.utils import (
call_recommendation_server,
get_disco_recommendations,
replace_extensions,
)
@pytest.mark.django_db
@mock.patch('olympia.discovery.utils.statsd.incr')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_fails_nice(requests_get, statsd_incr):
requests_get.side_effect = requests.exceptions.RequestException()
# Check the exception in requests.get is handled okay.
assert (
call_recommendation_server(settings.RECOMMENDATION_ENGINE_URL, '123456', {})
is None
)
statsd_incr.assert_called_with('services.recommendations.fail')
@pytest.mark.django_db
@mock.patch('olympia.discovery.utils.statsd.incr')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_succeeds(requests_get, statsd_incr):
requests_get.return_value = HttpResponse(json.dumps({'results': ['@lolwut']}))
assert call_recommendation_server(
settings.RECOMMENDATION_ENGINE_URL, '123456', {}
) == ['@lolwut']
statsd_incr.assert_called_with('services.recommendations.success')
@mock.patch('olympia.discovery.utils.requests.post')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_no_parameters(requests_get, requests_post):
url = settings.RECOMMENDATION_ENGINE_URL
taar_timeout = settings.RECOMMENDATION_ENGINE_TIMEOUT
requests_get.return_value = HttpResponse(json.dumps({'results': ['@lolwut']}))
# No parameters
call_recommendation_server(url, '123456', {})
requests_get.assert_called_with(url + '123456/', timeout=taar_timeout)
assert not requests_post.called
@mock.patch('olympia.discovery.utils.requests.post')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_some_parameters(requests_get, requests_post):
url = 'http://example.com/whatever/'
taar_timeout = settings.RECOMMENDATION_ENGINE_TIMEOUT
requests_get.return_value = HttpResponse(json.dumps({'results': ['@lolwut']}))
data = {'some': 'params', 'and': 'more'}
call_recommendation_server(url, '123456', data)
requests_get.assert_called_with(
url + '123456/?and=more&some=params', timeout=taar_timeout
)
assert not requests_post.called
@mock.patch('olympia.discovery.utils.requests.post')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_post(requests_get, requests_post):
url = 'http://example.com/taar_is_awesome/'
taar_timeout = settings.RECOMMENDATION_ENGINE_TIMEOUT
requests_get.return_value = HttpResponse(json.dumps({'results': ['@lolwut']}))
data = {'some': 'params', 'and': 'more'}
call_recommendation_server(url, '4815162342', data, verb='post')
assert not requests_get.called
requests_post.assert_called_with(
url + '4815162342/', json=data, timeout=taar_timeout
)
@mock.patch('olympia.discovery.utils.requests.post')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_post_no_parameters(requests_get, requests_post):
url = 'http://example.com/taar_is_awesome/'
taar_timeout = settings.RECOMMENDATION_ENGINE_TIMEOUT
requests_get.return_value = HttpResponse(json.dumps({'results': ['@lolwut']}))
call_recommendation_server(url, '4815162342', None, verb='post')
assert not requests_get.called
requests_post.assert_called_with(
url + '4815162342/', json=None, timeout=taar_timeout
)
@mock.patch('olympia.discovery.utils.requests.post')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_get_parameter_is_an_url(
requests_get, requests_post
):
url = 'http://example.com/taar_is_awesome/'
requests_get.return_value = HttpResponse(json.dumps({'results': []}))
assert call_recommendation_server(url, 'http://evil.com', {}, verb='get') is None
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, 'http://evil.com/', {}, verb='get') is None
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, 'http://[evil.com/', {}, verb='get') is None
assert not requests_get.called
assert not requests_post.called
assert (
call_recommendation_server(url, 'http://evil.com/foo', {}, verb='get') is None
)
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, '/foo', {}, verb='get') is None
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, '//foo', {}, verb='get') is None
assert not requests_get.called
assert not requests_post.called
@mock.patch('olympia.discovery.utils.requests.post')
@mock.patch('olympia.discovery.utils.requests.get')
def test_call_recommendation_server_post_parameter_is_an_url(
requests_get, requests_post
):
url = 'http://example.com/taar_is_awesome/'
requests_post.return_value = HttpResponse(json.dumps({'results': []}))
assert call_recommendation_server(url, 'http://evil.com', {}, verb='post') is None
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, 'http://evil.com/', {}, verb='post') is None
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, 'http://[evil.com/', {}, verb='post') is None
assert not requests_get.called
assert not requests_post.called
assert (
call_recommendation_server(url, 'http://evil.com/foo', {}, verb='post') is None
)
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, '/foo', {}, verb='post') is None
assert not requests_get.called
assert not requests_post.called
assert call_recommendation_server(url, '//foo', {}, verb='post') is None
assert not requests_get.called
assert not requests_post.called
@mock.patch('olympia.discovery.utils.call_recommendation_server')
@pytest.mark.django_db
def test_get_disco_recommendations(call_recommendation_server):
expected_addons = [
addon_factory(guid='101@mozilla'),
addon_factory(guid='102@mozilla'),
addon_factory(guid='103@mozilla'),
addon_factory(guid='104@mozilla'),
]
# Only the first one has a DiscoveryItem. The rest should still be
# returned.
call_recommendation_server.return_value = [
'101@mozilla',
'102@mozilla',
'103@mozilla',
'104@mozilla',
]
recommendations = get_disco_recommendations('0', [])
call_recommendation_server.assert_called_with(
'https://taar.dev.mozaws.net/v1/api/recommendations/', '0', None, verb='post'
)
assert [result.addon for result in recommendations] == expected_addons
# only valid, public add-ons should match guids
incomplete_addon = expected_addons.pop()
incomplete_addon.update(status=amo.STATUS_NULL)
# Remove this one and have recommendations return a bad guid instead.
expected_addons.pop()
call_recommendation_server.return_value = [
'101@mozilla',
'102@mozilla',
'103@badbadguid',
'104@mozilla',
]
recommendations = get_disco_recommendations('0', [])
assert [result.addon for result in recommendations] == expected_addons
@mock.patch('olympia.discovery.utils.call_recommendation_server')
def test_get_disco_recommendations_empty(call_recommendation_server):
call_recommendation_server.return_value = None
recommendations = get_disco_recommendations('0', [])
assert recommendations == []
call_recommendation_server.assert_called_with(
'https://taar.dev.mozaws.net/v1/api/recommendations/', '0', None, verb='post'
)
@mock.patch('olympia.discovery.utils.call_recommendation_server')
@pytest.mark.django_db
def test_get_disco_recommendations_overrides(call_recommendation_server):
call_recommendation_server.return_value = [
'@guid1',
'@guid2',
'103@mozilla',
'104@mozilla',
]
get_disco_recommendations('xxx', ['@guid1', '@guid2', '@guid3'])
data = {
'options': {
'promoted': [
['@guid1', 100],
['@guid2', 99],
['@guid3', 98],
]
}
}
call_recommendation_server.assert_called_with(
'https://taar.dev.mozaws.net/v1/api/recommendations/', 'xxx', data, verb='post'
)
@pytest.mark.django_db
def test_replace_extensions():
source = [
DiscoveryItem(addon=addon_factory()), # replaced
DiscoveryItem(addon=addon_factory()), # also replaced
DiscoveryItem(addon=addon_factory(type=amo.ADDON_STATICTHEME)), # not
DiscoveryItem(addon=addon_factory(type=amo.ADDON_STATICTHEME)), # nope
DiscoveryItem(addon=addon_factory()), # possibly replaced
DiscoveryItem(addon=addon_factory(type=amo.ADDON_STATICTHEME)), # nope
]
# Just 2 replacements
replacements = [
DiscoveryItem(addon=addon_factory()),
DiscoveryItem(addon=addon_factory()),
]
result = replace_extensions(source, replacements)
assert result == [
replacements[0],
replacements[1], # we only had two replacements.
source[2],
source[3],
source[4],
source[5],
], result
# Add a few more so all extensions are replaced, with one spare.
replacements.append(DiscoveryItem(addon=addon_factory()))
replacements.append(DiscoveryItem(addon=addon_factory()))
result = replace_extensions(source, replacements)
assert result == [
replacements[0],
replacements[1],
source[2], # Not an extension, so not replaced.
source[3], # Not an extension, so not replaced.
replacements[2],
source[5], # Not an extension, so not replaced.
], result
| {
"content_hash": "f5b5623ad86648a4977b233d557afcca",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 88,
"avg_line_length": 37.13768115942029,
"alnum_prop": 0.6863414634146342,
"repo_name": "bqbn/addons-server",
"id": "a7ed2ef36d456173c5fd79d3faa937cd0982d602",
"size": "10274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/discovery/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810080"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "585550"
},
{
"name": "JavaScript",
"bytes": "1071952"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5323934"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1503"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from hcat import hcat
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class HCatClient(Script):
def install(self, env):
import params
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
hcat()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HCatClientDefault(HCatClient):
pass
if __name__ == "__main__":
HCatClient().execute()
| {
"content_hash": "f26201e5cd3d1f46f4dd2bc31f229ab1",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 28.617021276595743,
"alnum_prop": 0.7650557620817844,
"repo_name": "arenadata/ambari",
"id": "8ae64b9e6f3f6aae32e1aa1c31e706930d3203fa",
"size": "1367",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
import unittest
from zang.inboundxml.elements.number import Number
from zang.inboundxml.elements.base_node import BaseNode
class TestNumber(unittest.TestCase):
def setUp(self):
self.number = '(555)555-555'
def test_init_with_required_values(self):
expected = '<Number>' + self.number + '</Number>'
assert Number(self.number).xml == expected
def test_init_with_optional_attributes(self):
number = Number(self.number, sendDigits='ww12w3221')
expected = '<Number sendDigits="ww12w3221">' + self.number + \
'</Number>'
assert number.xml == expected
def test_init_with_unsupported_attributes(self):
self.assertRaises(TypeError, lambda: Number(self.number, foo='bar'))
def test_with_update_attributes(self):
number = Number(self.number)
newNumber = 123456789
number.number = newNumber
number.sendDigits = 'ww12w3221'
expected = '<Number sendDigits="ww12w3221">%s</Number>' % newNumber
assert number.xml == expected
def test_udefinded_method_with_primitive_type(self):
self.assertRaises(
AttributeError, lambda: Number(self.number).addElement('bar'))
def test_udefinded_method_with_base_node(self):
self.assertRaises(
AttributeError, lambda: Number(self.number).addElement(BaseNode()))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "cff637c353b0202442854e4cc806a51a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 33.833333333333336,
"alnum_prop": 0.657283603096411,
"repo_name": "zang-cloud/zang-python",
"id": "0458d786af025ccb1c9fb4ef6f785a2dc34251c9",
"size": "1421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/inboundxml/test_number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "353373"
}
],
"symlink_target": ""
} |
from plenum.common.constants import CONFIG_LEDGER_ID
from plenum.common.messages.node_messages import Ordered
from plenum.test.helper import freshness, assertExp
from plenum.test.replica.conftest import *
from plenum.test.test_node import getPrimaryReplica
from stp_core.loop.eventually import eventually
FRESHNESS_TIMEOUT = 60
OLDEST_TS = 1499906903
LEDGER_IDS = [POOL_LEDGER_ID, CONFIG_LEDGER_ID, DOMAIN_LEDGER_ID]
@pytest.fixture(scope='function', params=[0])
def viewNo(tconf, request):
return request.param
@pytest.fixture(scope="module")
def tconf(tconf):
with freshness(tconf, enabled=True, timeout=FRESHNESS_TIMEOUT):
yield tconf
@pytest.fixture(scope='function')
def mock_timestamp():
return MockTimestamp(OLDEST_TS)
@pytest.fixture(scope='function')
def ledger_ids():
return LEDGER_IDS
@pytest.fixture(scope='function', params=[0])
def inst_id(request):
return request.param
@pytest.fixture(scope='function')
def replica_with_valid_requests(primary_replica):
requests = {ledger_id: sdk_random_request_objects(1, identifier="did",
protocol_version=CURRENT_PROTOCOL_VERSION)[0]
for ledger_id in LEDGER_IDS}
def patched_consume_req_queue_for_pre_prepare(ledger_id, tm, view_no, pp_seq_no):
reqs = [requests[ledger_id]] if len(primary_replica._ordering_service.requestQueues[ledger_id]) > 0 else []
return [reqs, [], []]
primary_replica._ordering_service._consume_req_queue_for_pre_prepare = patched_consume_req_queue_for_pre_prepare
return primary_replica, requests
def set_current_time(replica, ts):
replica.get_current_time.value = OLDEST_TS + ts
replica.get_time_for_3pc_batch.value = int(OLDEST_TS + ts)
def check_and_pop_ordered_pre_prepare(replica, ledger_ids):
for ledger_id in ledger_ids:
msg = replica.outBox.popleft()
assert isinstance(msg, PrePrepare)
assert msg.ledgerId == ledger_id
assert len(msg.reqIdr) > 0
for ledger_id in ledger_ids:
replica._ordering_service.requestQueues[ledger_id].clear()
def check_and_pop_freshness_pre_prepare(replica, ledger_id):
msg = replica.outBox.popleft()
assert isinstance(msg, PrePrepare)
assert msg.ledgerId == ledger_id
assert msg.reqIdr == tuple()
def test_no_freshness_pre_prepare_when_disabled(tconf, primary_replica):
with freshness(tconf, enabled=False, timeout=FRESHNESS_TIMEOUT):
assert len(primary_replica.outBox) == 0
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 0
set_current_time(primary_replica, FRESHNESS_TIMEOUT + 1)
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 0
def test_no_freshness_pre_prepare_for_non_master(tconf, primary_replica):
primary_replica.isMaster = False
primary_replica.instId = 1
assert len(primary_replica.outBox) == 0
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 0
set_current_time(primary_replica, FRESHNESS_TIMEOUT + 1)
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 0
def test_freshness_pre_prepare_initially(primary_replica):
assert len(primary_replica.outBox) == 0
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 0
@pytest.mark.parametrize('ts', [
0, 1, FRESHNESS_TIMEOUT, -1, -FRESHNESS_TIMEOUT
])
def test_freshness_pre_prepare_before_timeout(primary_replica, ts):
assert len(primary_replica.outBox) == 0
set_current_time(primary_replica, ts)
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 0
def test_freshness_pre_prepare_after_timeout(primary_replica):
assert len(primary_replica.outBox) == 0
primary_replica.send_3pc_batch()
set_current_time(primary_replica, FRESHNESS_TIMEOUT + 1)
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 3
check_and_pop_freshness_pre_prepare(primary_replica, POOL_LEDGER_ID)
check_and_pop_freshness_pre_prepare(primary_replica, DOMAIN_LEDGER_ID)
check_and_pop_freshness_pre_prepare(primary_replica, CONFIG_LEDGER_ID)
def test_freshness_pre_prepare_not_resend_before_next_timeout(primary_replica):
assert len(primary_replica.outBox) == 0
set_current_time(primary_replica, FRESHNESS_TIMEOUT + 1)
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 3
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 3
set_current_time(primary_replica, FRESHNESS_TIMEOUT + 1 + FRESHNESS_TIMEOUT)
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 3
set_current_time(primary_replica, FRESHNESS_TIMEOUT + 1 + FRESHNESS_TIMEOUT + 1)
primary_replica.send_3pc_batch()
assert len(primary_replica.outBox) == 6
@pytest.mark.parametrize('ordered, refreshed', [
([POOL_LEDGER_ID], [DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID]),
([DOMAIN_LEDGER_ID], [POOL_LEDGER_ID, CONFIG_LEDGER_ID]),
([CONFIG_LEDGER_ID], [POOL_LEDGER_ID, DOMAIN_LEDGER_ID]),
([POOL_LEDGER_ID, DOMAIN_LEDGER_ID], [CONFIG_LEDGER_ID]),
([POOL_LEDGER_ID, CONFIG_LEDGER_ID], [DOMAIN_LEDGER_ID]),
([DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID], [POOL_LEDGER_ID]),
([POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID], [])
])
def test_freshness_pre_prepare_only_when_no_requests_for_ledger(tconf,
replica_with_valid_requests,
ordered, refreshed):
replica, requests = replica_with_valid_requests
for ordered_ledger_id in ordered:
replica._ordering_service.requestQueues[ordered_ledger_id] = OrderedSet([requests[ordered_ledger_id].key])
# send 3PC batch for requests
assert len(replica.outBox) == 0
set_current_time(replica, tconf.Max3PCBatchWait + 1)
replica.send_3pc_batch()
assert len(replica.outBox) == len(ordered)
# wait for freshness timeout
set_current_time(replica, FRESHNESS_TIMEOUT + 1)
# order requests
for i in range(len(ordered)):
replica._ordering_service._order_3pc_key((0, i + 1))
check_and_pop_ordered_pre_prepare(replica, ordered)
# refresh state for unordered
replica.send_3pc_batch()
assert len(replica.outBox) == len(refreshed)
for refreshed_ledger_id in refreshed:
check_and_pop_freshness_pre_prepare(replica, refreshed_ledger_id)
def test_order_empty_pre_prepare(looper, tconf, txnPoolNodeSet):
assert all(node.master_replica.last_ordered_3pc == (0, 0) for node in txnPoolNodeSet)
assert all(node.spylog.count(node.processOrdered) == 0 for node in txnPoolNodeSet)
replica = getPrimaryReplica([txnPoolNodeSet[0]], instId=0)
replica._ordering_service._do_send_3pc_batch(ledger_id=POOL_LEDGER_ID)
looper.run(eventually(
lambda: assertExp(
all(
node.master_replica.last_ordered_3pc == (0, 1) for node in txnPoolNodeSet
)
)
))
looper.run(eventually(
lambda: assertExp(
all(
node.spylog.count(node.processOrdered) == 1 for node in txnPoolNodeSet
)
)
))
| {
"content_hash": "836b3c136df689d0787f404e1c4fdcb9",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 116,
"avg_line_length": 35.04326923076923,
"alnum_prop": 0.6830840993277542,
"repo_name": "evernym/zeno",
"id": "7963188c3630378f25072a53111a57acbc12df36",
"size": "7289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/freshness/test_replica_freshness.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
} |
'''
Datadog
www.datadoghq.com
----
Make sense of your IT Data
Licensed under Simplified BSD License (see LICENSE)
(C) Boxed Ice 2010 all rights reserved
(C) Datadog, Inc. 2010-2014 all rights reserved
'''
# set up logging before importing any other components
from config import get_version, initialize_logging # noqa
initialize_logging('collector')
# stdlib
import logging
import os
import signal
import sys
import time
# For pickle & PID files, see issue 293
os.umask(022)
# project
from checks.check_status import CollectorStatus
from checks.collector import Collector
from config import (
get_config,
get_parsed_args,
get_system_stats,
load_check_directory,
)
from daemon import AgentSupervisor, Daemon
from emitter import http_emitter
from util import (
EC2,
get_hostname,
Watchdog,
)
from utils.flare import configcheck, Flare
from utils.jmx import jmx_command
from utils.pidfile import PidFile
from utils.profile import AgentProfiler
# Constants
PID_NAME = "dd-agent"
WATCHDOG_MULTIPLIER = 10
RESTART_INTERVAL = 4 * 24 * 60 * 60 # Defaults to 4 days
START_COMMANDS = ['start', 'restart', 'foreground']
DD_AGENT_COMMANDS = ['check', 'flare', 'jmx']
DEFAULT_COLLECTOR_PROFILE_INTERVAL = 20
# Globals
log = logging.getLogger('collector')
class Agent(Daemon):
"""
The agent class is a daemon that runs the collector in a background process.
"""
def __init__(self, pidfile, autorestart, start_event=True, in_developer_mode=False):
Daemon.__init__(self, pidfile, autorestart=autorestart)
self.run_forever = True
self.collector = None
self.start_event = start_event
self.in_developer_mode = in_developer_mode
self._agentConfig = {}
self._checksd = []
self.collector_profile_interval = DEFAULT_COLLECTOR_PROFILE_INTERVAL
self.check_frequency = None
self.configs_reloaded = False
def _handle_sigterm(self, signum, frame):
"""Handles SIGTERM and SIGINT, which gracefully stops the agent."""
log.debug("Caught sigterm. Stopping run loop.")
self.run_forever = False
if self.collector:
self.collector.stop()
log.debug("Collector is stopped.")
def _handle_sigusr1(self, signum, frame):
"""Handles SIGUSR1, which signals an exit with an autorestart."""
self._handle_sigterm(signum, frame)
self._do_restart()
def _handle_sighup(self, signum, frame):
"""Handles SIGHUP, which signals a configuration reload."""
log.info("SIGHUP caught!")
self.reload_configs()
self.configs_reloaded = True
def reload_configs(self):
"""Reloads the agent configuration and checksd configurations."""
log.info("Attempting a configuration reload...")
# Reload checksd configs
hostname = get_hostname(self._agentConfig)
self._checksd = load_check_directory(self._agentConfig, hostname)
# Logging
num_checks = len(self._checksd['initialized_checks'])
if num_checks > 0:
log.info("Successfully reloaded {num_checks} checks".
format(num_checks=num_checks))
else:
log.info("No checksd configs found")
@classmethod
def info(cls, verbose=None):
logging.getLogger().setLevel(logging.ERROR)
return CollectorStatus.print_latest_status(verbose=verbose)
def run(self, config=None):
"""Main loop of the collector"""
# Gracefully exit on sigterm.
signal.signal(signal.SIGTERM, self._handle_sigterm)
# A SIGUSR1 signals an exit with an autorestart
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
# A SIGHUP signals a configuration reload
signal.signal(signal.SIGHUP, self._handle_sighup)
# Save the agent start-up stats.
CollectorStatus().persist()
# Intialize the collector.
if not config:
config = get_config(parse_args=True)
self._agentConfig = self._set_agent_config_hostname(config)
hostname = get_hostname(self._agentConfig)
systemStats = get_system_stats()
emitters = self._get_emitters()
# Load the checks.d checks
self._checksd = load_check_directory(self._agentConfig, hostname)
# Initialize the Collector
self.collector = Collector(self._agentConfig, emitters, systemStats, hostname)
# In developer mode, the number of runs to be included in a single collector profile
self.collector_profile_interval = self._agentConfig.get('collector_profile_interval',
DEFAULT_COLLECTOR_PROFILE_INTERVAL)
# Configure the watchdog.
self.check_frequency = int(self._agentConfig['check_freq'])
watchdog = self._get_watchdog(self.check_frequency)
# Initialize the auto-restarter
self.restart_interval = int(self._agentConfig.get('restart_interval', RESTART_INTERVAL))
self.agent_start = time.time()
profiled = False
collector_profiled_runs = 0
# Run the main loop.
while self.run_forever:
log.debug("Found {num_checks} checks".format(num_checks=len(self._checksd['initialized_checks'])))
# Setup profiling if necessary
if self.in_developer_mode and not profiled:
try:
profiler = AgentProfiler()
profiler.enable_profiling()
profiled = True
except Exception as e:
log.warn("Cannot enable profiler: %s" % str(e))
# Do the work.
self.collector.run(checksd=self._checksd,
start_event=self.start_event,
configs_reloaded=self.configs_reloaded)
if self.configs_reloaded:
self.configs_reloaded = False
if profiled:
if collector_profiled_runs >= self.collector_profile_interval:
try:
profiler.disable_profiling()
profiled = False
collector_profiled_runs = 0
except Exception as e:
log.warn("Cannot disable profiler: %s" % str(e))
# Check if we should restart.
if self.autorestart and self._should_restart():
self._do_restart()
# Only plan for next loop if we will continue, otherwise exit quickly.
if self.run_forever:
if watchdog:
watchdog.reset()
if profiled:
collector_profiled_runs += 1
log.info("Sleeping for {0} seconds".format(self.check_frequency))
time.sleep(self.check_frequency)
# Now clean-up.
try:
CollectorStatus.remove_latest_status()
except Exception:
pass
# Explicitly kill the process, because it might be running as a daemon.
log.info("Exiting. Bye bye.")
sys.exit(0)
def _get_emitters(self):
return [http_emitter]
def _get_watchdog(self, check_freq):
watchdog = None
if self._agentConfig.get("watchdog", True):
watchdog = Watchdog(check_freq * WATCHDOG_MULTIPLIER,
max_mem_mb=self._agentConfig.get('limit_memory_consumption', None))
watchdog.reset()
return watchdog
def _set_agent_config_hostname(self, agentConfig):
# Try to fetch instance Id from EC2 if not hostname has been set
# in the config file.
# DEPRECATED
if agentConfig.get('hostname') is None and agentConfig.get('use_ec2_instance_id'):
instanceId = EC2.get_instance_id(agentConfig)
if instanceId is not None:
log.info("Running on EC2, instanceId: %s" % instanceId)
agentConfig['hostname'] = instanceId
else:
log.info('Not running on EC2, using hostname to identify this server')
return agentConfig
def _should_restart(self):
if time.time() - self.agent_start > self.restart_interval:
return True
return False
def _do_restart(self):
log.info("Running an auto-restart.")
if self.collector:
self.collector.stop()
sys.exit(AgentSupervisor.RESTART_EXIT_STATUS)
def main():
options, args = get_parsed_args()
agentConfig = get_config(options=options)
autorestart = agentConfig.get('autorestart', False)
hostname = get_hostname(agentConfig)
in_developer_mode = agentConfig.get('developer_mode')
COMMANDS_AGENT = [
'start',
'stop',
'restart',
'status',
'foreground',
]
COMMANDS_NO_AGENT = [
'info',
'check',
'configcheck',
'jmx',
'flare',
]
COMMANDS = COMMANDS_AGENT + COMMANDS_NO_AGENT
if len(args) < 1:
sys.stderr.write("Usage: %s %s\n" % (sys.argv[0], "|".join(COMMANDS)))
return 2
command = args[0]
if command not in COMMANDS:
sys.stderr.write("Unknown command: %s\n" % command)
return 3
# Deprecation notice
if command not in DD_AGENT_COMMANDS:
# Will become an error message and exit after deprecation period
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
if command in COMMANDS_AGENT:
agent = Agent(PidFile('dd-agent').get_path(), autorestart, in_developer_mode=in_developer_mode)
if command in START_COMMANDS:
log.info('Agent version %s' % get_version())
if 'start' == command:
log.info('Start daemon')
agent.start()
elif 'stop' == command:
log.info('Stop daemon')
agent.stop()
elif 'restart' == command:
log.info('Restart daemon')
agent.restart()
elif 'status' == command:
agent.status()
elif 'info' == command:
return Agent.info(verbose=options.verbose)
elif 'foreground' == command:
logging.info('Running in foreground')
if autorestart:
# Set-up the supervisor callbacks and fork it.
logging.info('Running Agent with auto-restart ON')
def child_func():
agent.start(foreground=True)
def parent_func():
agent.start_event = False
AgentSupervisor.start(parent_func, child_func)
else:
# Run in the standard foreground.
agent.start(foreground=True)
elif 'check' == command:
if len(args) < 2:
sys.stderr.write(
"Usage: %s check <check_name> [check_rate]\n"
"Add check_rate as last argument to compute rates\n"
% sys.argv[0]
)
return 1
check_name = args[1]
try:
import checks.collector
# Try the old-style check first
print getattr(checks.collector, check_name)(log).check(agentConfig)
except Exception:
# If not an old-style check, try checks.d
checks = load_check_directory(agentConfig, hostname)
for check in checks['initialized_checks']:
if check.name == check_name:
if in_developer_mode:
check.run = AgentProfiler.wrap_profiling(check.run)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
if len(args) == 3 and args[2] == 'check_rate':
print "Running 2nd iteration to capture rate metrics"
time.sleep(1)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
check.stop()
elif 'configcheck' == command or 'configtest' == command:
configcheck()
elif 'jmx' == command:
jmx_command(args[1:], agentConfig)
elif 'flare' == command:
Flare.check_user_rights()
case_id = int(args[1]) if len(args) > 1 else None
f = Flare(True, case_id)
f.collect()
try:
f.upload()
except Exception, e:
print 'The upload failed:\n{0}'.format(str(e))
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except StandardError:
# Try our best to log the error.
try:
log.exception("Uncaught error running the Agent")
except Exception:
pass
raise
| {
"content_hash": "eb3df21002a45fc83a8594a36d55bb22",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 110,
"avg_line_length": 32.87277353689567,
"alnum_prop": 0.5914544469386176,
"repo_name": "polynomial/dd-agent",
"id": "80c397ef6d9f1e023afdaf2fe1b131c6f0a5af3a",
"size": "12960",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "agent.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "1663521"
},
{
"name": "Ruby",
"bytes": "97486"
},
{
"name": "Shell",
"bytes": "51526"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
} |
from pyjamas import DOM
from pyjamas import Factory
from ComplexPanel import ComplexPanel
class DeckPanel(ComplexPanel):
def __init__(self, **kwargs):
self.visibleWidget = None
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
else:
element = DOM.createDiv()
self.setElement(element)
ComplexPanel.__init__(self, **kwargs)
def add(self, widget):
self.insert(widget, self.getWidgetCount())
def getVisibleWidget(self):
return self.getWidgetIndex(self.visibleWidget)
def insert(self, widget, beforeIndex=None):
if (beforeIndex < 0) or (beforeIndex > self.getWidgetCount()):
# throw new IndexOutOfBoundsException();
return
ComplexPanel.insert(self, widget, self.getElement(), beforeIndex)
child = widget.getElement()
DOM.setStyleAttribute(child, "width", "100%")
DOM.setStyleAttribute(child, "height", "100%")
widget.setVisible(False)
def remove(self, widget):
if isinstance(widget, int):
widget = self.getWidget(widget)
if not ComplexPanel.remove(self, widget):
return False
if self.visibleWidget == widget:
self.visibleWidget = None
return True
def showWidget(self, index):
self.checkIndex(index)
if self.visibleWidget is not None:
self.visibleWidget.setVisible(False)
self.visibleWidget = self.getWidget(index)
self.visibleWidget.setVisible(True)
def checkIndex(self, index):
if (index < 0) or (index >= self.getWidgetCount()):
# throw new IndexOutOfBoundsException();
pass
Factory.registerClass('pyjamas.ui.DeckPanel', DeckPanel)
| {
"content_hash": "c885850c451f31148b89879956782267",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 29.21311475409836,
"alnum_prop": 0.6290684624017957,
"repo_name": "jaredly/pyjamas",
"id": "f54cb73936651a0ccc5da70421934728dbd046bf",
"size": "2442",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "library/pyjamas/ui/DeckPanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "411613"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "4243623"
},
{
"name": "Shell",
"bytes": "14781"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError:
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import get_model
from cms.utils.compat.dj import python_2_unicode_compatible
@python_2_unicode_compatible
class ImagePropertyMixin(object):
"""
A mixin class to convert a CascadeElement into a proxy model for rendering the ``<a>`` element.
"""
def __str__(self):
return self.plugin_class.get_identifier(self)
@property
def image(self):
if not hasattr(self, '_image_model'):
try:
Model = get_model(*self.glossary['image']['model'].split('.'))
self._image_model = Model.objects.get(pk=self.glossary['image']['pk'])
except (KeyError, ObjectDoesNotExist):
self._image_model = None
return self._image_model
| {
"content_hash": "2a6b5256c288d8424b02000ad11459a7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 99,
"avg_line_length": 36.7037037037037,
"alnum_prop": 0.669021190716448,
"repo_name": "aldryn/djangocms-cascade",
"id": "df2603db18b87e99886238ea21f6254dce7f7c75",
"size": "1015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_cascade/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3472"
},
{
"name": "JavaScript",
"bytes": "86926"
},
{
"name": "Python",
"bytes": "212332"
},
{
"name": "Shell",
"bytes": "5131"
}
],
"symlink_target": ""
} |
import sys
import time
from django.conf import settings
from django.utils.datastructures import DictWrapper
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def db_type(self, field):
return self._db_type(field, field.get_internal_type())
def related_db_type(self, field):
return self._db_type(field, field.get_related_internal_type())
def _db_type(self, field, internal_type):
data = DictWrapper(field.__dict__, self.connection.ops.quote_name, "qn_")
try:
return self.connection.creation.data_types[internal_type] % data
except KeyError:
return None
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
if not f.null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"Return the SQL snippet defining the foreign key reference for a field"
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' + \
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' + \
style.SQL_FIELD(qn(field.rel.to._meta.get_field(field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"Returns any ALTER TABLE statements to add constraints after the fact."
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % \
(qn(r_table), qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_for_many_to_many(self, model, style):
"Return the CREATE TABLE statments for all the many-to-many tables defined on a model"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
output = []
for f in model._meta.local_many_to_many:
if model._meta.managed or f.rel.to._meta.managed:
output.extend(self.sql_for_many_to_many_field(model, f, style))
return output
def sql_for_many_to_many_field(self, model, f, style):
"Return the CREATE TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
from django.db.backends.util import truncate_name
output = []
if f.auto_created:
opts = model._meta
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace, inline=True)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
table_output = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + \
style.SQL_TABLE(qn(f.m2m_db_table())) + ' (']
table_output.append(' %s %s %s%s,' %
(style.SQL_FIELD(qn('id')),
style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL PRIMARY KEY'),
tablespace_sql))
deferred = []
inline_output, deferred = self.sql_for_inline_many_to_many_references(model, f, style)
table_output.extend(inline_output)
table_output.append(' %s (%s, %s)%s' %
(style.SQL_KEYWORD('UNIQUE'),
style.SQL_FIELD(qn(f.m2m_column_name())),
style.SQL_FIELD(qn(f.m2m_reverse_name())),
tablespace_sql))
table_output.append(')')
if opts.db_tablespace:
# f.db_tablespace is only for indices, so ignore its value here.
table_output.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
table_output.append(';')
output.append('\n'.join(table_output))
for r_table, r_col, table, col in deferred:
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table),
qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
# Add any extra SQL needed to support auto-incrementing PKs
autoinc_sql = self.connection.ops.autoinc_sql(f.m2m_db_table(), 'id')
if autoinc_sql:
for stmt in autoinc_sql:
output.append(stmt)
return output
def sql_for_inline_many_to_many_references(self, model, field, style):
"Create the references to other tables required by a many-to-many table"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(opts.db_table)),
style.SQL_FIELD(qn(opts.pk.column)),
self.connection.ops.deferrable_sql()),
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(field.rel.to._meta.db_table)),
style.SQL_FIELD(qn(field.rel.to._meta.pk.column)),
self.connection.ops.deferrable_sql())
]
deferred = []
return table_output, deferred
def sql_indexes_for_model(self, model, style):
"Returns the CREATE INDEX SQL statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"Return the CREATE INDEX SQL statements for a single model field"
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"Return the DROP TABLE and restraint dropping statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def sql_destroy_many_to_many(self, model, f, style):
"Returns the DROP TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
qn = self.connection.ops.quote_name
output = []
if f.auto_created:
output.append("%s %s;" % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
ds = self.connection.ops.drop_sequence_sql("%s_%s" % (model._meta.db_table, f.column))
if ds:
output.append(ds)
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"Internal implementation - creates the test db tables."
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self.set_autocommit()
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Destroying test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self.connection.settings_dict['NAME'] = old_database_name
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"Internal implementation - remove the test db tables."
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self.set_autocommit()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"Make sure a connection is in autocommit mode."
if hasattr(self.connection.connection, "autocommit"):
if callable(self.connection.connection.autocommit):
self.connection.connection.autocommit(True)
else:
self.connection.connection.autocommit = True
elif hasattr(self.connection.connection, "set_isolation_level"):
self.connection.connection.set_isolation_level(0)
def sql_table_creation_suffix(self):
"SQL to append to the end of the test table creation statements"
return ''
| {
"content_hash": "187e7920de6e58cfa4ba3e2aeea8af72",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 148,
"avg_line_length": 45.89837398373984,
"alnum_prop": 0.572491364803826,
"repo_name": "bernardokyotoku/skillplant",
"id": "3932a1740e228256fca1b81f2b6cba96e7858650",
"size": "22582",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/db/backends/creation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "103281"
},
{
"name": "Python",
"bytes": "4219238"
},
{
"name": "Shell",
"bytes": "500"
}
],
"symlink_target": ""
} |
'''OpenGL extension EXT.texture_snorm
This module customises the behaviour of the
OpenGL.raw.GL.EXT.texture_snorm to provide a more
Python-friendly API
Overview (from the spec)
Fixed-point textures in unextended OpenGL have integer components,
but those values are taken to represent floating-point values in
the range [0.0,1.0]. These integer components are considered
"unsigned normalized" integers. When such a texture is accessed by
a shader or by fixed-function fragment processing, floating-point
values are returned in the range [0.0,1.0].
This extension provides a set of new "signed normalized" integer
texture formats. These are taken to represent a floating-point
value in the range [-1.0,1.0] with an exact 0.0.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/texture_snorm.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_snorm import *
from OpenGL.raw.GL.EXT.texture_snorm import _EXTENSION_NAME
def glInitTextureSnormEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | {
"content_hash": "6a4f183046cb04eb685550273e55d3dd",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 37.111111111111114,
"alnum_prop": 0.7829341317365269,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "f424b40c409ff2e044cc8b3613eccc3e9b2ad026",
"size": "1336",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GL/EXT/texture_snorm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
"""The tests for the Scene component."""
import io
import unittest
from homeassistant.setup import setup_component
from homeassistant.components import light, scene
from homeassistant.util.yaml import loader as yaml_loader
from tests.common import get_test_home_assistant
from tests.components.light import common as common_light
from tests.components.scene import common
class TestScene(unittest.TestCase):
"""Test the scene component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
test_light = getattr(self.hass.components, 'test.light')
test_light.init()
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {'platform': 'test'}
})
self.light_1, self.light_2 = test_light.DEVICES[0:2]
common_light.turn_off(
self.hass, [self.light_1.entity_id, self.light_2.entity_id])
self.hass.block_till_done()
assert not self.light_1.is_on
assert not self.light_2.is_on
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_config_yaml_alias_anchor(self):
"""Test the usage of YAML aliases and anchors.
The following test scene configuration is equivalent to:
scene:
- name: test
entities:
light_1: &light_1_state
state: 'on'
brightness: 100
light_2: *light_1_state
When encountering a YAML alias/anchor, the PyYAML parser will use a
reference to the original dictionary, instead of creating a copy, so
care needs to be taken to not modify the original.
"""
entity_state = {
'state': 'on',
'brightness': 100,
}
assert setup_component(self.hass, scene.DOMAIN, {
'scene': [{
'name': 'test',
'entities': {
self.light_1.entity_id: entity_state,
self.light_2.entity_id: entity_state,
}
}]
})
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_1.last_call('turn_on')[1].get('brightness')
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
def test_config_yaml_bool(self):
"""Test parsing of booleans in yaml config."""
config = (
'scene:\n'
' - name: test\n'
' entities:\n'
' {0}: on\n'
' {1}:\n'
' state: on\n'
' brightness: 100\n').format(
self.light_1.entity_id, self.light_2.entity_id)
with io.StringIO(config) as file:
doc = yaml_loader.yaml.load(file)
assert setup_component(self.hass, scene.DOMAIN, doc)
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
def test_activate_scene(self):
"""Test active scene."""
assert setup_component(self.hass, scene.DOMAIN, {
'scene': [{
'name': 'test',
'entities': {
self.light_1.entity_id: 'on',
self.light_2.entity_id: {
'state': 'on',
'brightness': 100,
}
}
}]
})
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
| {
"content_hash": "b505a0231582cacd0593f038804304c8",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 76,
"avg_line_length": 32.479674796747965,
"alnum_prop": 0.5476846057571965,
"repo_name": "aequitas/home-assistant",
"id": "99364d51e6c5ef0a6a42665671c8a75df58d6975",
"size": "3995",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/scene/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
__author__ = 'fahadadeel'
import jpype
import os.path
from programmingwithdocuments import ExtractContent
asposeapispath = os.path.join(os.path.abspath("../../../../"), "lib")
print "You need to put your Aspose.Words for Java APIs .jars in this folder:\n"+asposeapispath
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath)
testObject = ExtractContent('./data/')
testObject.main()
| {
"content_hash": "a70e45752aa360117b3c18d5b3779827",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 94,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7367149758454107,
"repo_name": "aspose-words/Aspose.Words-for-Java",
"id": "86f85d73230c02b8179ecb5d45ea538ed0eca91c",
"size": "414",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Plugins/Aspose_Words_Java_for_Python/tests/programmingwithdocuments/workingwithdocument/extractcontent/ExtractContent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "43601"
},
{
"name": "Java",
"bytes": "599231"
},
{
"name": "PHP",
"bytes": "136694"
},
{
"name": "Python",
"bytes": "150390"
},
{
"name": "Rich Text Format",
"bytes": "98703"
},
{
"name": "Ruby",
"bytes": "81832"
}
],
"symlink_target": ""
} |
from functools import partial
from typing import Any, Callable, Dict
import numpy as np
import pandas as pd
from flask_babel import gettext as _
from pandas import DataFrame, NamedAgg
from superset.exceptions import InvalidPostProcessingError
NUMPY_FUNCTIONS = {
"average": np.average,
"argmin": np.argmin,
"argmax": np.argmax,
"count": np.ma.count,
"count_nonzero": np.count_nonzero,
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"max": np.max,
"mean": np.mean,
"median": np.median,
"nansum": np.nansum,
"nanmin": np.nanmin,
"nanmax": np.nanmax,
"nanmean": np.nanmean,
"nanmedian": np.nanmedian,
"nanpercentile": np.nanpercentile,
"min": np.min,
"percentile": np.percentile,
"prod": np.prod,
"product": np.product,
"std": np.std,
"sum": np.sum,
"var": np.var,
}
DENYLIST_ROLLING_FUNCTIONS = (
"count",
"corr",
"cov",
"kurt",
"max",
"mean",
"median",
"min",
"std",
"skew",
"sum",
"var",
"quantile",
)
ALLOWLIST_CUMULATIVE_FUNCTIONS = (
"cummax",
"cummin",
"cumprod",
"cumsum",
)
PROPHET_TIME_GRAIN_MAP = {
"PT1S": "S",
"PT1M": "min",
"PT5M": "5min",
"PT10M": "10min",
"PT15M": "15min",
"PT30M": "30min",
"PT1H": "H",
"P1D": "D",
"P1W": "W",
"P1M": "M",
"P3M": "Q",
"P1Y": "A",
"1969-12-28T00:00:00Z/P1W": "W-SUN",
"1969-12-29T00:00:00Z/P1W": "W-MON",
"P1W/1970-01-03T00:00:00Z": "W-SAT",
"P1W/1970-01-04T00:00:00Z": "W-SUN",
}
RESAMPLE_METHOD = ("asfreq", "bfill", "ffill", "linear", "median", "mean", "sum")
FLAT_COLUMN_SEPARATOR = ", "
def _is_multi_index_on_columns(df: DataFrame) -> bool:
return isinstance(df.columns, pd.MultiIndex)
def validate_column_args(*argnames: str) -> Callable[..., Any]:
def wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
def wrapped(df: DataFrame, **options: Any) -> Any:
if _is_multi_index_on_columns(df):
# MultiIndex column validate first level
columns = df.columns.get_level_values(0)
else:
columns = df.columns.tolist()
for name in argnames:
if name in options and not all(
elem in columns for elem in options.get(name) or []
):
raise InvalidPostProcessingError(
_("Referenced columns not available in DataFrame.")
)
return func(df, **options)
return wrapped
return wrapper
def _get_aggregate_funcs(
df: DataFrame,
aggregates: Dict[str, Dict[str, Any]],
) -> Dict[str, NamedAgg]:
"""
Converts a set of aggregate config objects into functions that pandas can use as
aggregators. Currently only numpy aggregators are supported.
:param df: DataFrame on which to perform aggregate operation.
:param aggregates: Mapping from column name to aggregate config.
:return: Mapping from metric name to function that takes a single input argument.
"""
agg_funcs: Dict[str, NamedAgg] = {}
for name, agg_obj in aggregates.items():
column = agg_obj.get("column", name)
if column not in df:
raise InvalidPostProcessingError(
_(
"Column referenced by aggregate is undefined: %(column)s",
column=column,
)
)
if "operator" not in agg_obj:
raise InvalidPostProcessingError(
_(
"Operator undefined for aggregator: %(name)s",
name=name,
)
)
operator = agg_obj["operator"]
if callable(operator):
aggfunc = operator
else:
func = NUMPY_FUNCTIONS.get(operator)
if not func:
raise InvalidPostProcessingError(
_(
"Invalid numpy function: %(operator)s",
operator=operator,
)
)
options = agg_obj.get("options", {})
aggfunc = partial(func, **options)
agg_funcs[name] = NamedAgg(column=column, aggfunc=aggfunc)
return agg_funcs
def _append_columns(
base_df: DataFrame, append_df: DataFrame, columns: Dict[str, str]
) -> DataFrame:
"""
Function for adding columns from one DataFrame to another DataFrame. Calls the
assign method, which overwrites the original column in `base_df` if the column
already exists, and appends the column if the name is not defined.
Note that! this is a memory-intensive operation.
:param base_df: DataFrame which to use as the base
:param append_df: DataFrame from which to select data.
:param columns: columns on which to append, mapping source column to
target column. For instance, `{'y': 'y'}` will replace the values in
column `y` in `base_df` with the values in `y` in `append_df`,
while `{'y': 'y2'}` will add a column `y2` to `base_df` based
on values in column `y` in `append_df`, leaving the original column `y`
in `base_df` unchanged.
:return: new DataFrame with combined data from `base_df` and `append_df`
"""
if all(key == value for key, value in columns.items()):
# make sure to return a new DataFrame instead of changing the `base_df`.
_base_df = base_df.copy()
_base_df.loc[:, columns.keys()] = append_df
return _base_df
append_df = append_df.rename(columns=columns)
return pd.concat([base_df, append_df], axis="columns")
| {
"content_hash": "5da76ddc144b5c279ce7a4333e5dfcbc",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 85,
"avg_line_length": 30.891304347826086,
"alnum_prop": 0.5730119634060521,
"repo_name": "zhouyao1994/incubator-superset",
"id": "3d14f643c54b56f196528356d1457227e01b2084",
"size": "6469",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superset/utils/pandas_postprocessing/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4776"
},
{
"name": "Dockerfile",
"bytes": "6940"
},
{
"name": "HTML",
"bytes": "1243911"
},
{
"name": "JavaScript",
"bytes": "2445349"
},
{
"name": "Jinja",
"bytes": "5542"
},
{
"name": "Jupyter Notebook",
"bytes": "1925627"
},
{
"name": "Less",
"bytes": "106438"
},
{
"name": "Makefile",
"bytes": "3946"
},
{
"name": "Mako",
"bytes": "1197"
},
{
"name": "Pug",
"bytes": "2969"
},
{
"name": "Python",
"bytes": "6296253"
},
{
"name": "Shell",
"bytes": "56211"
},
{
"name": "Smarty",
"bytes": "4298"
},
{
"name": "TypeScript",
"bytes": "6909337"
}
],
"symlink_target": ""
} |
'''
Created on May 20, 2014
@author: PengAn
'''
from app.lib import pdfMinerHelper, pyPdfHelper
from flask import render_template
import urllib2
from flask import Response
def parseRequest(request,fileStream):
parserChoice = request.form['parser']
if parserChoice == 'html':
resultHtml = pdfMinerHelper.toHTML(fileStream)
return resultHtml
if parserChoice == 'text':
resultText = pdfMinerHelper.toText(fileStream)
return render_template("result.html",result=resultText)
if parserChoice == 'minerxml':
resultXML = pdfMinerHelper.toXML(fileStream)
return Response(resultXML, mimetype='text/xml')
if parserChoice == 'xpdf':
resultXML = requestPDFX(fileStream)
return Response(resultXML, mimetype='text/xml')
if parserChoice == 'pypdf2text':
resultText = pyPdfHelper.getPDFContent(fileStream)
return render_template("result.html",result=resultText)
def requestPDFX(fileStream):
pdfdata = fileStream.read()
request = urllib2.Request('http://pdfx.cs.man.ac.uk', pdfdata, headers={'Content-Type' : 'application/pdf'})
resultXML = urllib2.urlopen(request).read()
return resultXML
def parseRequestWithLocalPath(request,filepath):
fileStream = file(filepath, 'rb')
return parseRequest(request, fileStream)
| {
"content_hash": "035a3682f7520890a974d53bde9ed97c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 112,
"avg_line_length": 35.05263157894737,
"alnum_prop": 0.7102102102102102,
"repo_name": "pengan1987/pythonDev",
"id": "2f1bb13f9efdcabb68797a54691373109c7cb5b1",
"size": "1332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/lib/requestProcessor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "116391"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
} |
"""Simple tokenizer for Javanese sentences.
"""
from __future__ import unicode_literals
import re
from utils import utf8
SPLIT = re.compile(r'[/\s]+')
TOKEN = re.compile(r"[-'a-zâåèéêìòù]+$")
def main(unused_argv):
for line in utf8.stdin:
for token in SPLIT.split(line.strip().rstrip('?').lower()):
if TOKEN.match(token):
writer = utf8.stdout
else:
writer = utf8.stderr
writer.write('%s\n' % token)
return
if __name__ == '__main__':
main(None)
| {
"content_hash": "fcb5f6c2bdb2a942216e578df3c263f0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 19.076923076923077,
"alnum_prop": 0.6129032258064516,
"repo_name": "googlei18n/language-resources",
"id": "bc685301d48d9b49a60d78aa8103c27311e15fb6",
"size": "1133",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jv/tokenize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5561"
},
{
"name": "C++",
"bytes": "358454"
},
{
"name": "Dockerfile",
"bytes": "6989"
},
{
"name": "Java",
"bytes": "77129"
},
{
"name": "Makefile",
"bytes": "1621"
},
{
"name": "Python",
"bytes": "396406"
},
{
"name": "Ruby",
"bytes": "25"
},
{
"name": "Shell",
"bytes": "40740"
}
],
"symlink_target": ""
} |
from src.base.solution import Solution
from src.tests.part2.q074_test_search_2d_matrix import Search2dMatrixTestCases
class Search2dMatrix(Solution):
def gen_test_cases(self):
return Search2dMatrixTestCases()
def run_test(self, input):
return self.searchMatrix(input[0], input[1])
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
m, n= len(matrix), len(matrix[0])
sr, er = 0, m-1
# print(sr, er)
if target >= matrix[m-1][0]:
sr = m-1
else:
while sr < er - 1:
mid = (sr + er) / 2
if target == matrix[mid][0]:
return True
if target < matrix[mid][0]:
er = mid
else:
sr = mid
# print(sr, er)
sc, ec = 0, n-1
while sc <= ec:
# print("col", sc, ec)
mid = (sc + ec) / 2
if target == matrix[sr][mid]:
return True
if target < matrix[sr][mid]:
ec = mid - 1
else:
sc = mid + 1
return False
if __name__ == '__main__':
sol = Search2dMatrix()
sol.run_tests() | {
"content_hash": "194f28848cb0b35019c32e1d1c78c9a0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 23.913793103448278,
"alnum_prop": 0.4578226387887527,
"repo_name": "hychrisli/PyAlgorithms",
"id": "864f908f33339859670ffed368a2266da1e2d607",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/solutions/part2/q074_search_2d_matrix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "201747"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 12); | {
"content_hash": "9741520d2b4a35c3ed5e010245e003f5",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 162,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.7022900763358778,
"repo_name": "antoinecarme/pyaf",
"id": "844bf2453e28bb4cfcdccf8a0e25a7d114be109e",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_0/ar_12/test_artificial_32_Anscombe_Lag1Trend_0_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
'''
static typed pystone
'''
from time import clock
TRUE = 1
FALSE = 0
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Ident1 = 1
Ident2 = 2
Ident3 = 3
Ident4 = 4
Ident5 = 5
let PtrGlb : Record = None
let PtrGlbNext : Record = None
class Record:
def __init__(self, PtrComp:Record = None, Discr = 0, EnumComp = 0, IntComp = 0, StringComp = '\0'):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self) ->Record:
r = Record(
PtrComp=self.PtrComp,
Discr=self.Discr,
EnumComp=self.EnumComp,
IntComp=self.IntComp,
StringComp=self.StringComp
)
return r
def create_array1glob(n:int) -> []int:
comp = []int( 0 for i in range(51) )
return comp
def create_array2glob(n:int) -> [][]int:
comp = [][]int(
create_array1glob(n) for i in range(n)
)
return comp
Array1Glob = create_array1glob(51)
Array2Glob = create_array2glob(51)
def Func1(CharPar1:string, CharPar2:string) ->int:
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1:string, StrParI2:string) -> int:
IntLoc = 1
CharLoc = '\0' ## c++ scope style
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn:int) ->int:
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
def Proc2(IntParIO:int) ->int:
IntLoc = IntParIO + 10
EnumLoc = -1 ## c++ scope style
while True:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn:int) ->int:
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1:int, IntParI2:int) ->int:
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par:[]int, Array2Par:[][]int, IntParI1:int, IntParI2:int):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Proc3(PtrParOut:Record) ->Record:
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc1(PtrParIn:Record ) ->Record:
NextRecord = PtrGlb.copy()
PtrParIn.PtrComp = NextRecord
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc0(loops:int):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
PtrGlbNext = Record( PtrComp=None, Discr=0, EnumComp=0, IntComp=0, StringComp='\0' )
PtrGlb = Record(
PtrComp=PtrGlbNext,
Discr=Ident1,
EnumComp=Ident3,
IntComp=40,
StringComp="DHRYSTONE PROGRAM, SOME STRING"
)
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
## c++ has different variable scope rules that are safer (and better)
## than regular Python, where IntLoc3 is created in while loop below `while IntLoc1 < IntLoc2:`
## IntLoc3 then bleeds into the outer scope, this is bad, what if `IntLoc1 > IntLoc2` then IntLoc3 is what?
IntLoc3 = -1 ## c++ scope hack
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
def pystones(loops:int):
starttime = clock()
Proc0(loops)
benchtime = clock() - starttime
#print(benchtime)
loopsPerBenchtime = ( double(loops) / benchtime)
print(loopsPerBenchtime)
#print("#Pystone(%s) time for %s passes = %s" % (__version__, LOOPS, benchtime))
#print("#This machine benchmarks at pystones/second: %s" %stones)
def main():
LOOPS = 100000
pystones( LOOPS )
| {
"content_hash": "aa17201bc25a940d1148c2afc50d78bf",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 108,
"avg_line_length": 23.297071129707113,
"alnum_prop": 0.6979166666666666,
"repo_name": "tempbottle/Rusthon",
"id": "00e57b1ed03f3591d8e8a7f72aede49a11f1ecfe",
"size": "5568",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "regtests/bench/pystone-typed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "23667"
},
{
"name": "HTML",
"bytes": "44433"
},
{
"name": "JavaScript",
"bytes": "29887"
},
{
"name": "Perl",
"bytes": "66040"
},
{
"name": "Python",
"bytes": "1024366"
}
],
"symlink_target": ""
} |
import unittest
import error_handling as er
class FileLike(object):
def __init__(self, fail_something=True):
self.is_open = False
self.was_open = False
self.did_something = False
self.fail_something = fail_something
def open(self):
self.was_open = False
self.is_open = True
def close(self):
self.is_open = False
self.was_open = True
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def do_something(self):
self.did_something = True
if self.fail_something:
raise Exception("Failed while doing something")
class ErrorHandlingTest(unittest.TestCase):
def test_throw_exception(self):
with self.assertRaisesWithMessage(Exception):
er.handle_error_by_throwing_exception()
def test_return_none(self):
self.assertEqual(er.handle_error_by_returning_none('1'), 1,
'Result of valid input should not be None')
self.assertIsNone(er.handle_error_by_returning_none('a'),
'Result of invalid input should be None')
def test_return_tuple(self):
successful_result, result = er.handle_error_by_returning_tuple('1')
self.assertIs(successful_result, True,
'Valid input should be successful')
self.assertEqual(result, 1, 'Result of valid input should not be None')
failure_result, result = er.handle_error_by_returning_tuple('a')
self.assertIs(failure_result, False,
'Invalid input should not be successful')
def test_filelike_objects_are_closed_on_exception(self):
filelike_object = FileLike(fail_something=True)
with self.assertRaisesWithMessage(Exception):
er.filelike_objects_are_closed_on_exception(filelike_object)
self.assertIs(filelike_object.is_open, False,
'filelike_object should be closed')
self.assertIs(filelike_object.was_open, True,
'filelike_object should have been opened')
self.assertIs(filelike_object.did_something, True,
'filelike_object should call do_something()')
def test_filelike_objects_are_closed_without_exception(self):
filelike_object = FileLike(fail_something=False)
er.filelike_objects_are_closed_on_exception(filelike_object)
self.assertIs(filelike_object.is_open, False,
'filelike_object should be closed')
self.assertIs(filelike_object.was_open, True,
'filelike_object should have been opened')
self.assertIs(filelike_object.did_something, True,
'filelike_object should call do_something()')
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex = self.assertRaisesRegexp
except AttributeError:
pass
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d575532f6ce2558defa3562eed2617cf",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 35.63636363636363,
"alnum_prop": 0.6221301020408163,
"repo_name": "pheanex/xpython",
"id": "a7a76f7fa79a29def0f914118b419c012c6c6ad6",
"size": "3136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/error-handling/error_handling_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "410838"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
} |
import logging
from typing import Dict, Optional, TYPE_CHECKING
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.evaluation.per_policy_sample_collector import \
_PerPolicySampleCollector
from ray.rllib.evaluation.sample_collector import _SampleCollector
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import MultiAgentBatch
from ray.rllib.utils import force_list
from ray.rllib.utils.annotations import override
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.typing import AgentID, EnvID, EpisodeID, PolicyID, \
TensorType
from ray.util.debug import log_once
if TYPE_CHECKING:
from ray.rllib.agents.callbacks import DefaultCallbacks
logger = logging.getLogger(__name__)
class _MultiAgentSampleCollector(_SampleCollector):
"""Builds SampleBatches for each policy (and agent) in a multi-agent env.
Note: This is an experimental class only used when
`config._use_trajectory_view_api` = True.
Once `_use_trajectory_view_api` becomes the default in configs:
This class will deprecate the `SampleBatchBuilder` class.
Input data is collected in central per-policy buffers, which
efficiently pre-allocate memory (over n timesteps) and re-use the same
memory even for succeeding agents and episodes.
Input_dicts for action computations, SampleBatches for postprocessing, and
train_batch dicts are - if possible - created from the central per-policy
buffers via views to avoid copying of data).
"""
def __init__(
self,
policy_map: Dict[PolicyID, Policy],
callbacks: "DefaultCallbacks",
# TODO: (sven) make `num_agents` flexibly grow in size.
num_agents: int = 100,
num_timesteps=None,
time_major: Optional[bool] = False):
"""Initializes a _MultiAgentSampleCollector object.
Args:
policy_map (Dict[PolicyID,Policy]): Maps policy ids to policy
instances.
callbacks (DefaultCallbacks): RLlib callbacks (configured in the
Trainer config dict). Used for trajectory postprocessing event.
num_agents (int): The max number of agent slots to pre-allocate
in the buffer.
num_timesteps (int): The max number of timesteps to pre-allocate
in the buffer.
time_major (Optional[bool]): Whether to preallocate buffers and
collect samples in time-major fashion (TxBx...).
"""
self.policy_map = policy_map
self.callbacks = callbacks
if num_agents == float("inf") or num_agents is None:
num_agents = 1000
self.num_agents = int(num_agents)
# Collect SampleBatches per-policy in _PerPolicySampleCollectors.
self.policy_sample_collectors = {}
for pid, policy in policy_map.items():
# Figure out max-shifts (before and after).
view_reqs = policy.training_view_requirements
max_shift_before = 0
max_shift_after = 0
for vr in view_reqs.values():
shift = force_list(vr.shift)
if max_shift_before > shift[0]:
max_shift_before = shift[0]
if max_shift_after < shift[-1]:
max_shift_after = shift[-1]
# Figure out num_timesteps and num_agents.
kwargs = {"time_major": time_major}
if policy.is_recurrent():
kwargs["num_timesteps"] = \
policy.config["model"]["max_seq_len"]
kwargs["time_major"] = True
elif num_timesteps is not None:
kwargs["num_timesteps"] = num_timesteps
self.policy_sample_collectors[pid] = _PerPolicySampleCollector(
num_agents=self.num_agents,
shift_before=-max_shift_before,
shift_after=max_shift_after,
**kwargs)
# Internal agent-to-policy map.
self.agent_to_policy = {}
# Number of "inference" steps taken in the environment.
# Regardless of the number of agents involved in each of these steps.
self.count = 0
@override(_SampleCollector)
def add_init_obs(self, episode_id: EpisodeID, agent_id: AgentID,
env_id: EnvID, policy_id: PolicyID,
obs: TensorType) -> None:
# Make sure our mappings are up to date.
if agent_id not in self.agent_to_policy:
self.agent_to_policy[agent_id] = policy_id
else:
assert self.agent_to_policy[agent_id] == policy_id
# Add initial obs to Trajectory.
self.policy_sample_collectors[policy_id].add_init_obs(
episode_id, agent_id, env_id, chunk_num=0, init_obs=obs)
@override(_SampleCollector)
def add_action_reward_next_obs(self, episode_id: EpisodeID,
agent_id: AgentID, env_id: EnvID,
policy_id: PolicyID, agent_done: bool,
values: Dict[str, TensorType]) -> None:
assert policy_id in self.policy_sample_collectors
# Make sure our mappings are up to date.
if agent_id not in self.agent_to_policy:
self.agent_to_policy[agent_id] = policy_id
else:
assert self.agent_to_policy[agent_id] == policy_id
# Include the current agent id for multi-agent algorithms.
if agent_id != _DUMMY_AGENT_ID:
values["agent_id"] = agent_id
# Add action/reward/next-obs (and other data) to Trajectory.
self.policy_sample_collectors[policy_id].add_action_reward_next_obs(
episode_id, agent_id, env_id, agent_done, values)
@override(_SampleCollector)
def total_env_steps(self) -> int:
return sum(a.timesteps_since_last_reset
for a in self.policy_sample_collectors.values())
def total(self):
# TODO: (sven) deprecate; use `self.total_env_steps`, instead.
# Sampler is currently still using `total()`.
return self.total_env_steps()
@override(_SampleCollector)
def get_inference_input_dict(self, policy_id: PolicyID) -> \
Dict[str, TensorType]:
policy = self.policy_map[policy_id]
view_reqs = policy.model.inference_view_requirements
return self.policy_sample_collectors[
policy_id].get_inference_input_dict(view_reqs)
@override(_SampleCollector)
def has_non_postprocessed_data(self) -> bool:
return self.total_env_steps() > 0
@override(_SampleCollector)
def postprocess_trajectories_so_far(
self, episode: Optional[MultiAgentEpisode] = None) -> None:
# Loop through each per-policy collector and create a view (for each
# agent as SampleBatch) from its buffers for post-processing
all_agent_batches = {}
for pid, rc in self.policy_sample_collectors.items():
policy = self.policy_map[pid]
view_reqs = policy.training_view_requirements
agent_batches = rc.get_postprocessing_sample_batches(
episode, view_reqs)
for agent_key, batch in agent_batches.items():
other_batches = None
if len(agent_batches) > 1:
other_batches = agent_batches.copy()
del other_batches[agent_key]
agent_batches[agent_key] = policy.postprocess_trajectory(
batch, other_batches, episode)
# Call the Policy's Exploration's postprocess method.
if getattr(policy, "exploration", None) is not None:
agent_batches[
agent_key] = policy.exploration.postprocess_trajectory(
policy, agent_batches[agent_key],
getattr(policy, "_sess", None))
# Add new columns' data to buffers.
for col in agent_batches[agent_key].new_columns:
data = agent_batches[agent_key].data[col]
rc._build_buffers({col: data[0]})
timesteps = data.shape[0]
rc.buffers[col][rc.shift_before:rc.shift_before +
timesteps, rc.agent_key_to_slot[
agent_key]] = data
all_agent_batches.update(agent_batches)
if log_once("after_post"):
logger.info("Trajectory fragment after postprocess_trajectory():"
"\n\n{}\n".format(summarize(all_agent_batches)))
# Append into policy batches and reset
from ray.rllib.evaluation.rollout_worker import get_global_worker
for agent_key, batch in sorted(all_agent_batches.items()):
self.callbacks.on_postprocess_trajectory(
worker=get_global_worker(),
episode=episode,
agent_id=agent_key[0],
policy_id=self.agent_to_policy[agent_key[0]],
policies=self.policy_map,
postprocessed_batch=batch,
original_batches=None) # TODO: (sven) do we really need this?
@override(_SampleCollector)
def check_missing_dones(self, episode_id: EpisodeID) -> None:
for pid, rc in self.policy_sample_collectors.items():
for agent_key in rc.agent_key_to_slot.keys():
# Only check for given episode and only for last chunk
# (all previous chunks for that agent in the episode are
# non-terminal).
if (agent_key[1] == episode_id
and rc.agent_key_to_chunk_num[agent_key[:2]] ==
agent_key[2]):
t = rc.agent_key_to_timestep[agent_key] - 1
b = rc.agent_key_to_slot[agent_key]
if not rc.buffers["dones"][t][b]:
raise ValueError(
"Episode {} terminated for all agents, but we "
"still don't have a last observation for "
"agent {} (policy {}). ".format(agent_key[0], pid)
+ "Please ensure that you include the last "
"observations of all live agents when setting "
"'__all__' done to True. Alternatively, set "
"no_done_at_end=True to allow this.")
@override(_SampleCollector)
def get_multi_agent_batch_and_reset(self):
self.postprocess_trajectories_so_far()
policy_batches = {}
for pid, rc in self.policy_sample_collectors.items():
policy = self.policy_map[pid]
view_reqs = policy.training_view_requirements
policy_batches[pid] = rc.get_train_sample_batch_and_reset(
view_reqs)
ma_batch = MultiAgentBatch.wrap_as_needed(policy_batches, self.count)
# Reset our across-all-agents env step count.
self.count = 0
return ma_batch
| {
"content_hash": "89f9ba4a0dd45616fa107769a9e5177f",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 79,
"avg_line_length": 45.06827309236948,
"alnum_prop": 0.5891106754589199,
"repo_name": "robertnishihara/ray",
"id": "7c21b0bec22f9c746e172362c36623703a0a83bd",
"size": "11222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/evaluation/multi_agent_sample_collector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82909"
},
{
"name": "C++",
"bytes": "3971373"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Cython",
"bytes": "179979"
},
{
"name": "Dockerfile",
"bytes": "6468"
},
{
"name": "Go",
"bytes": "23139"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1248954"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "6567694"
},
{
"name": "Shell",
"bytes": "102477"
},
{
"name": "Starlark",
"bytes": "231513"
},
{
"name": "TypeScript",
"bytes": "147793"
}
],
"symlink_target": ""
} |
import onirim.agent
import onirim.action
import onirim.card
import onirim.data
import onirim.tool
import itertools
import logging
import operator
def max_index(iterator):
return max(enumerate(iterator), key=operator.itemgetter(1))[0]
def _can_obtain_door(content):
"""
Check if the explored cards can obtain a door.
"""
last_card = content.explored[-1]
same_count = 0
for card in reversed(content.explored):
if last_card.color == card.color:
same_count += 1
else:
break
return same_count % 3 == 0
def _is_openable(door_card, card):
"""Check if the door can be opened by another card."""
return card.kind == onirim.card.LocationKind.key and door_card.color == card.color
def back_idxes(idx):
return list(range(idx)) + list(range(idx + 1, 5))
class Evaluator(onirim.agent.Actor):
phase1_available_actions = list(itertools.chain(
((onirim.action.Phase1.play, idx) for idx in range(5)),
((onirim.action.Phase1.discard, idx) for idx in range(5))))
available_key_discard_react = [(idx, back_idxes(idx)) for idx in range(5)]
available_open_door = [False, True]
available_nightmare_actions = list(itertools.chain(
((onirim.action.Nightmare.by_key, {"idx": idx}) for idx in range(5)),
((onirim.action.Nightmare.by_door, {"idx": idx}) for idx in range(8)),
[
(onirim.action.Nightmare.by_hand, {}),
(onirim.action.Nightmare.by_deck, {})]))
def __init__(self, evaluation_func):
self._evaluate = evaluation_func
def _after_phase_1_action(self, content, action):
new_content = content.copy()
phase_1_action, idx = action
card = new_content.hand[idx]
if phase_1_action == onirim.action.Phase1.play:
if new_content.explored and card.kind == new_content.explored[-1].kind:
# consecutive same kind
return None
new_content.explored.append(card)
new_content.hand.remove(card)
if _can_obtain_door(new_content):
color = new_content.explored[-1].color
door_card = new_content.piles.pull_door(color)
if door_card is not None:
new_content.opened.append(door_card)
elif phase_1_action == onirim.action.Phase1.discard:
new_content.hand.remove(card)
new_content.piles.put_discard(card)
return new_content
def _phase_1_action_scores(self, content):
for action in self.phase1_available_actions:
yield self._evaluate(self._after_phase_1_action(content, action))
def phase_1_action(self, content):
idx = max_index(self._phase_1_action_scores(content))
return self.phase1_available_actions[idx]
def _after_key_discard_react(self, content, cards, react):
new_content = content.copy()
discarded_idx, back_idxes = react
new_content.piles.put_discard(cards[discarded_idx])
new_content.piles.put_undrawn_iter(cards[idx] for idx in back_idxes)
return new_content
def _key_discard_react_scores(self, content, cards):
for react in self.available_key_discard_react:
yield self._evaluate(self._after_key_discard_react(content, cards, react))
def key_discard_react(self, content, cards):
idx = max_index(self._key_discard_react_scores(content, cards))
return self.available_key_discard_react[idx]
def _after_open_door(self, content, door_card, do_open):
new_content = content.copy()
if not do_open:
new_content.piles.put_limbo(door_card)
return new_content
new_content.opened.append(door_card)
for card in new_content.hand:
if _is_openable(door_card, card):
new_content.hand.remove(card)
new_content.piles.put_discard(card)
break
return new_content
def _open_door_scores(self, content, door_card):
for do_open in self.available_open_door:
yield self._evaluate(self._after_open_door(content, door_card, do_open))
def open_door(self, content, door_card):
idx = max_index(self._open_door_scores(content, door_card))
return self.available_open_door[idx]
def _nightmare_action_by_key(self, content, **additional):
try:
idx = additional["idx"]
card = content.hand[idx]
if card.kind != onirim.card.LocationKind.key:
return False
content.hand.remove(card)
content.piles.put_discard(card)
except IndexError:
return False
return True
def _nightmare_action_by_door(self, content, **additional):
try:
idx = additional["idx"]
card = content.opened[idx]
content.opened.remove(card)
content.piles.put_limbo(card)
except IndexError:
return False
return True
def _nightmare_action_by_hand(self, content, **additional):
for card in content.hand:
content.piles.put_discard(card)
content.hand.clear()
# do not replenish hand or it may trigger the second nightmare
return True
def _nightmare_action_by_deck(self, content, **additional):
return False
# XXX know the future
#try:
# for card in content.piles.draw(5):
# if card.kind is None:
# content.piles.put_limbo(card)
# else:
# content.piles.put_discard(card)
# # TODO card not enough is okay??
#except:
# return False
#return True
_resolve = {
onirim.action.Nightmare.by_key: _nightmare_action_by_key,
onirim.action.Nightmare.by_door: _nightmare_action_by_door,
onirim.action.Nightmare.by_hand: _nightmare_action_by_hand,
onirim.action.Nightmare.by_deck: _nightmare_action_by_deck,
}
def _after_nightmare_action(self, content, action):
new_content = content.copy()
nightmare_action, additional = action
if not Evaluator._resolve[nightmare_action](self, new_content, **additional):
return None
new_content.piles.put_discard(onirim.card.nightmare())
return new_content
def _nightmare_action_scores(self, content):
for action in self.available_nightmare_actions:
yield self._evaluate(self._after_nightmare_action(content, action))
def nightmare_action(self, content):
idx = max_index(self._nightmare_action_scores(content))
return self.available_nightmare_actions[idx]
def evaluate(content):
if content is None:
return -100000000
nondiscard = content.piles.undrawn + content.piles.limbo + content.hand + content.opened
# TODO implement evaluation function here
return 0
def __main__():
logging.basicConfig(
filename="ai_dev_demo.log",
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.WARNING)
actor = Evaluator(evaluate)
observer = onirim.agent.ProfiledObserver()
content_fac = onirim.data.starting_content
onirim.tool.progressed_run(1000, actor, observer, content_fac)
print("{}/{}".format(observer.win, observer.total))
print("Opened door: {}".format(observer.opened_door))
print("Opened by keys: {}".format(observer.opened_door_by_key))
print("Keys discarded: {}".format(observer.key_discarded))
print(str(observer.opened_distribution))
if __name__ == "__main__":
__main__()
| {
"content_hash": "cb01f1fdebdfe57c3ae7974c73c084bf",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 92,
"avg_line_length": 35.486111111111114,
"alnum_prop": 0.6211350293542074,
"repo_name": "cwahbong/onirim-py",
"id": "806ca0e75f367d1ac6283da08607df7b176e433e",
"size": "7665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ai_dev_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67594"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("agenda", "0007_backfill_edims_id"),
]
operations = [
migrations.CreateModel(
name="ScrapeLog",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
(
"num_documents_found",
models.PositiveSmallIntegerField(blank=True, null=True),
),
(
"errors",
models.TextField(
blank=True,
help_text="Errors that occured while scraping",
null=True,
),
),
(
"duration",
models.PositiveSmallIntegerField(
blank=True, help_text="How long the scrape took", null=True
),
),
("bandcs_scraped", models.ManyToManyField(to="agenda.BandC")),
("documents_scraped", models.ManyToManyField(to="agenda.Document")),
],
),
]
| {
"content_hash": "092dd0f56ec330d56b0d31c32e125584",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 84,
"avg_line_length": 32.08695652173913,
"alnum_prop": 0.4010840108401084,
"repo_name": "crccheck/atx-bandc",
"id": "a56d94d98ddbc2039aa68aa177ec5edc08819a5c",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bandc/apps/agenda/migrations/0008_scrapelog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "556"
},
{
"name": "HTML",
"bytes": "190542"
},
{
"name": "Makefile",
"bytes": "2074"
},
{
"name": "Python",
"bytes": "44644"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['NoCycle'] , ['LSTM'] ); | {
"content_hash": "c46ac71320c4dedccfb19eca2fa39f13",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 78,
"avg_line_length": 37.75,
"alnum_prop": 0.7019867549668874,
"repo_name": "antoinecarme/pyaf",
"id": "a5775c68e5c5a8552bb80efa749edf3f56865b0c",
"size": "151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingAverage_NoCycle_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import logging
import t411, re
from termcolor import colored, cprint
class search_t411(object):
"""
Main class: search torrents from t411 server
"""
def __init__(self,
username, password, title="", season=None, episode=None, seeders_min=0):
self.username = username
self.password = password
self.title = title
self.season = season
self.episode = episode
self.seeders_min = seeders_min
self.source = self.__readsource__()
self.regexp = self.search_regexp()
logging.debug("Search regexp: %s" % self.regexp)
#get the list of torrent from t411 source
self.list = self.buildlist()
#sort list of torrents
self.list.sort(key=lambda torrent: int(torrent['seeders']), reverse=True)
logging.info("%s torrent(s) found" % len(self.list))
def __readsource__(self):
"""
Connect to the t411 server
"""
try:
src = t411.T411(self.username, self.password)
except Exception as e:
logging.error("Error while trying connection to t411... %s" % e.message)
sys.exit(1)
else:
print("Connected to the t411 server")
return src
def search_regexp(self):
"""
Define the regexp used for the search
"""
if ((self.season == None) and (self.episode == None)):
regexp = '^%s.*' % self.title.lower()
elif (self.episode == None):
regexp = '^%s.*(s[0]*%s|season[\s\_\-\.]*%s).*' % (self.title.lower(), self.season, self.season)
else:
regexp = '^%s.*((s[0]*%s.*e[0]*%s)|[0]*%sx[0]*%s).*' % (self.title.lower(), self.season, self.episode, self.season, self.episode)
return regexp
def buildlist(self, limit=1000):
"""
Build the torrent list
Return list of list sorted by seeders count
Id can be used to retrieve torrent associate with this id
[[<title>, <Seeders>, <id>] ...]
"""
try:
s = self.source.search(self.title.lower(), limit)
except Exception as e:
logging.error("Can not send search request to the t411 server")
logging.error(e.message)
sys.exit(1)
try:
for t in s.items():
pass
except:
logging.error("t411 server returned an invalid result")
sys.exit(1)
torrentlist = []
for torrent in s['torrents']:
if isinstance(torrent, dict):
#logging.debug("Compare regex to: %s" % t.title.lower())
if (re.search(self.regexp, torrent['name'].lower()) and (int(torrent['seeders']) >= self.seeders_min)):
# logging.debug("Matched")
torrentlist.append( {
'name': torrent['name'],
'seeders': torrent['seeders'],
'id': torrent['id']})
logging.debug("Found %d matching items " % (len(torrentlist)))
# Return the list
return torrentlist
def getTorrentList(self):
return self.list
def printTorrentList(self):
if self.list is None or len(self.list) == 0:
print('No torrent found')
else:
line = colored('id \t\tseeders \tname', 'white', attrs=['bold'])
print(line)
even = True
for torrent in self.list:
if even :
attrs = ['reverse', 'blink']
even = False
else:
attrs = None
even = True
line = colored(torrent['id']+ '\t\t' + torrent['seeders'] + '\t\t' + torrent['name'], 'white', attrs=attrs)
print(line)
| {
"content_hash": "daae538ad8f29cce9412d9a2b44785ce",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 141,
"avg_line_length": 31.3739837398374,
"alnum_prop": 0.5167141746566468,
"repo_name": "Bilb/t411todeluge",
"id": "4ba4558bf6a982ef25261429ab0207c27c26e977",
"size": "3907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search_t411.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16181"
}
],
"symlink_target": ""
} |
import warnings
import logging
import unittest
import ssl
from itertools import chain
from mock import patch, Mock
from urllib3 import add_stderr_logger, disable_warnings
from urllib3.util.request import make_headers
from urllib3.util.timeout import Timeout
from urllib3.util.url import (
get_host,
parse_url,
split_first,
Url,
)
from urllib3.util.ssl_ import (
resolve_cert_reqs,
ssl_wrap_socket,
)
from urllib3.exceptions import (
LocationParseError,
TimeoutStateError,
InsecureRequestWarning,
SSLError,
)
from urllib3.util import is_fp_closed, ssl_
from . import clear_warnings
# This number represents a time in seconds, it doesn't mean anything in
# isolation. Setting to a high-ish value to avoid conflicts with the smaller
# numbers used for timeouts
TIMEOUT_EPOCH = 1000
class TestUtil(unittest.TestCase):
def test_get_host(self):
url_host_map = {
# Hosts
'http://google.com/mail': ('http', 'google.com', None),
'http://google.com/mail/': ('http', 'google.com', None),
'google.com/mail': ('http', 'google.com', None),
'http://google.com/': ('http', 'google.com', None),
'http://google.com': ('http', 'google.com', None),
'http://www.google.com': ('http', 'www.google.com', None),
'http://mail.google.com': ('http', 'mail.google.com', None),
'http://google.com:8000/mail/': ('http', 'google.com', 8000),
'http://google.com:8000': ('http', 'google.com', 8000),
'https://google.com': ('https', 'google.com', None),
'https://google.com:8000': ('https', 'google.com', 8000),
'http://user:[email protected]:1234': ('http', '127.0.0.1', 1234),
'http://google.com/foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com?foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com#foo=http://bar:42/baz': ('http', 'google.com', None),
# IPv4
'173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7/test': ('http', '173.194.35.7', None),
'http://173.194.35.7:80': ('http', '173.194.35.7', 80),
'http://173.194.35.7:80/test': ('http', '173.194.35.7', 80),
# IPv6
'[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]/test': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]:80': ('http', '[2a00:1450:4001:c01::67]', 80),
'http://[2a00:1450:4001:c01::67]:80/test': ('http', '[2a00:1450:4001:c01::67]', 80),
# More IPv6 from http://www.ietf.org/rfc/rfc2732.txt
'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8000/index.html': ('http', '[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]', 8000),
'http://[1080:0:0:0:8:800:200C:417A]/index.html': ('http', '[1080:0:0:0:8:800:200C:417A]', None),
'http://[3ffe:2a00:100:7031::1]': ('http', '[3ffe:2a00:100:7031::1]', None),
'http://[1080::8:800:200C:417A]/foo': ('http', '[1080::8:800:200C:417A]', None),
'http://[::192.9.5.5]/ipng': ('http', '[::192.9.5.5]', None),
'http://[::FFFF:129.144.52.38]:42/index.html': ('http', '[::FFFF:129.144.52.38]', 42),
'http://[2010:836B:4179::836B:4179]': ('http', '[2010:836B:4179::836B:4179]', None),
}
for url, expected_host in url_host_map.items():
returned_host = get_host(url)
self.assertEqual(returned_host, expected_host)
def test_invalid_host(self):
# TODO: Add more tests
invalid_host = [
'http://google.com:foo',
'http://::1/',
'http://::1:80/',
]
for location in invalid_host:
self.assertRaises(LocationParseError, get_host, location)
parse_url_host_map = {
'http://google.com/mail': Url('http', host='google.com', path='/mail'),
'http://google.com/mail/': Url('http', host='google.com', path='/mail/'),
'http://google.com/mail': Url('http', host='google.com', path='mail'),
'google.com/mail': Url(host='google.com', path='/mail'),
'http://google.com/': Url('http', host='google.com', path='/'),
'http://google.com': Url('http', host='google.com'),
'http://google.com?foo': Url('http', host='google.com', path='', query='foo'),
# Path/query/fragment
'': Url(),
'/': Url(path='/'),
'#?/!google.com/?foo#bar': Url(path='', fragment='?/!google.com/?foo#bar'),
'/foo': Url(path='/foo'),
'/foo?bar=baz': Url(path='/foo', query='bar=baz'),
'/foo?bar=baz#banana?apple/orange': Url(path='/foo', query='bar=baz', fragment='banana?apple/orange'),
# Port
'http://google.com/': Url('http', host='google.com', path='/'),
'http://google.com:80/': Url('http', host='google.com', port=80, path='/'),
'http://google.com:80': Url('http', host='google.com', port=80),
# Auth
'http://foo:bar@localhost/': Url('http', auth='foo:bar', host='localhost', path='/'),
'http://foo@localhost/': Url('http', auth='foo', host='localhost', path='/'),
'http://foo:bar@baz@localhost/': Url('http', auth='foo:bar@baz', host='localhost', path='/'),
'http://@': Url('http', host=None, auth='')
}
non_round_tripping_parse_url_host_map = {
# Path/query/fragment
'?': Url(path='', query=''),
'#': Url(path='', fragment=''),
# Empty Port
'http://google.com:': Url('http', host='google.com'),
'http://google.com:/': Url('http', host='google.com', path='/'),
}
def test_parse_url(self):
for url, expected_Url in chain(self.parse_url_host_map.items(), self.non_round_tripping_parse_url_host_map.items()):
returned_Url = parse_url(url)
self.assertEqual(returned_Url, expected_Url)
def test_unparse_url(self):
for url, expected_Url in self.parse_url_host_map.items():
self.assertEqual(url, expected_Url.url)
def test_parse_url_invalid_IPv6(self):
self.assertRaises(ValueError, parse_url, '[::1')
def test_Url_str(self):
U = Url('http', host='google.com')
self.assertEqual(str(U), U.url)
def test_request_uri(self):
url_host_map = {
'http://google.com/mail': '/mail',
'http://google.com/mail/': '/mail/',
'http://google.com/': '/',
'http://google.com': '/',
'': '/',
'/': '/',
'?': '/?',
'#': '/',
'/foo?bar=baz': '/foo?bar=baz',
}
for url, expected_request_uri in url_host_map.items():
returned_url = parse_url(url)
self.assertEqual(returned_url.request_uri, expected_request_uri)
def test_netloc(self):
url_netloc_map = {
'http://google.com/mail': 'google.com',
'http://google.com:80/mail': 'google.com:80',
'google.com/foobar': 'google.com',
'google.com:12345': 'google.com:12345',
}
for url, expected_netloc in url_netloc_map.items():
self.assertEqual(parse_url(url).netloc, expected_netloc)
def test_make_headers(self):
self.assertEqual(
make_headers(accept_encoding=True),
{'accept-encoding': 'gzip,deflate'})
self.assertEqual(
make_headers(accept_encoding='foo,bar'),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=['foo', 'bar']),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=True, user_agent='banana'),
{'accept-encoding': 'gzip,deflate', 'user-agent': 'banana'})
self.assertEqual(
make_headers(user_agent='banana'),
{'user-agent': 'banana'})
self.assertEqual(
make_headers(keep_alive=True),
{'connection': 'keep-alive'})
self.assertEqual(
make_headers(basic_auth='foo:bar'),
{'authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(proxy_basic_auth='foo:bar'),
{'proxy-authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(disable_cache=True),
{'cache-control': 'no-cache'})
def test_split_first(self):
test_cases = {
('abcd', 'b'): ('a', 'cd', 'b'),
('abcd', 'cb'): ('a', 'cd', 'b'),
('abcd', ''): ('abcd', '', None),
('abcd', 'a'): ('', 'bcd', 'a'),
('abcd', 'ab'): ('', 'bcd', 'a'),
}
for input, expected in test_cases.items():
output = split_first(*input)
self.assertEqual(output, expected)
def test_add_stderr_logger(self):
handler = add_stderr_logger(level=logging.INFO) # Don't actually print debug
logger = logging.getLogger('urllib3')
self.assertTrue(handler in logger.handlers)
logger.debug('Testing add_stderr_logger')
logger.removeHandler(handler)
def test_disable_warnings(self):
with warnings.catch_warnings(record=True) as w:
clear_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
disable_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
def _make_time_pass(self, seconds, timeout, time_mock):
""" Make some time pass for the timeout object """
time_mock.return_value = TIMEOUT_EPOCH
timeout.start_connect()
time_mock.return_value = TIMEOUT_EPOCH + seconds
return timeout
def test_invalid_timeouts(self):
try:
Timeout(total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(connect=2, total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(read=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
# Booleans are allowed also by socket.settimeout and converted to the
# equivalent float (1.0 for True, 0.0 for False)
Timeout(connect=False, read=True)
try:
Timeout(read="foo")
self.fail("string value should not be allowed")
except ValueError as e:
self.assertTrue('int or float' in str(e))
@patch('urllib3.util.timeout.current_time')
def test_timeout(self, current_time):
timeout = Timeout(total=3)
# make 'no time' elapse
timeout = self._make_time_pass(seconds=0, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 3)
self.assertEqual(timeout.connect_timeout, 3)
timeout = Timeout(total=3, connect=2)
self.assertEqual(timeout.connect_timeout, 2)
timeout = Timeout()
self.assertEqual(timeout.connect_timeout, Timeout.DEFAULT_TIMEOUT)
# Connect takes 5 seconds, leaving 5 seconds for read
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=5, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 5)
# Connect takes 2 seconds, read timeout still 7 seconds
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=2, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=10, read=7)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=None, read=None, connect=None)
self.assertEqual(timeout.connect_timeout, None)
self.assertEqual(timeout.read_timeout, None)
self.assertEqual(timeout.total, None)
timeout = Timeout(5)
self.assertEqual(timeout.total, 5)
def test_timeout_str(self):
timeout = Timeout(connect=1, read=2, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=2, total=3)")
timeout = Timeout(connect=1, read=None, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=None, total=3)")
@patch('urllib3.util.timeout.current_time')
def test_timeout_elapsed(self, current_time):
current_time.return_value = TIMEOUT_EPOCH
timeout = Timeout(total=3)
self.assertRaises(TimeoutStateError, timeout.get_connect_duration)
timeout.start_connect()
self.assertRaises(TimeoutStateError, timeout.start_connect)
current_time.return_value = TIMEOUT_EPOCH + 2
self.assertEqual(timeout.get_connect_duration(), 2)
current_time.return_value = TIMEOUT_EPOCH + 37
self.assertEqual(timeout.get_connect_duration(), 37)
def test_resolve_cert_reqs(self):
self.assertEqual(resolve_cert_reqs(None), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_NONE), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_REQUIRED), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('REQUIRED'), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('CERT_REQUIRED'), ssl.CERT_REQUIRED)
def test_is_fp_closed_object_supports_closed(self):
class ClosedFile(object):
@property
def closed(self):
return True
self.assertTrue(is_fp_closed(ClosedFile()))
def test_is_fp_closed_object_has_none_fp(self):
class NoneFpFile(object):
@property
def fp(self):
return None
self.assertTrue(is_fp_closed(NoneFpFile()))
def test_is_fp_closed_object_has_fp(self):
class FpFile(object):
@property
def fp(self):
return True
self.assertTrue(not is_fp_closed(FpFile()))
def test_is_fp_closed_object_has_neither_fp_nor_closed(self):
class NotReallyAFile(object):
pass
self.assertRaises(ValueError, is_fp_closed, NotReallyAFile())
def test_ssl_wrap_socket_loads_the_cert_chain(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, sock=socket,
certfile='/path/to/certfile')
mock_context.load_cert_chain.assert_called_once_with(
'/path/to/certfile', None)
def test_ssl_wrap_socket_loads_verify_locations(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_certs='/path/to/pem',
sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
'/path/to/pem')
def test_ssl_wrap_socket_with_no_sni(self):
socket = object()
mock_context = Mock()
# Ugly preservation of original value
HAS_SNI = ssl_.HAS_SNI
ssl_.HAS_SNI = False
ssl_wrap_socket(ssl_context=mock_context, sock=socket)
mock_context.wrap_socket.assert_called_once_with(socket)
ssl_.HAS_SNI = HAS_SNI
| {
"content_hash": "f983340c5630220d64ba5fb9826f84e7",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 140,
"avg_line_length": 38.97290640394089,
"alnum_prop": 0.5697402515325791,
"repo_name": "gardner/urllib3",
"id": "19ba57e3a682bfba0201ac6c7c36f69002cb184a",
"size": "15823",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "906"
},
{
"name": "Python",
"bytes": "416169"
},
{
"name": "Shell",
"bytes": "1036"
}
],
"symlink_target": ""
} |
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'funannotate'
DESCRIPTION = 'funannotate: eukaryotic genome annotation pipeline'
URL = 'https://github.com/nextgenusfs/funannotate'
EMAIL = '[email protected]'
AUTHOR = 'Jon Palmer'
REQUIRES_PYTHON = '>=3.6.0, <3.10'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'biopython', 'goatools',
'seaborn','psutil',
'pandas','matplotlib',
'natsort', 'numpy',
'requests', 'scikit-learn',
'scipy', 'distro'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print(('\033[1m{0}\033[0m'.format(s)))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
entry_points={
'console_scripts':
['funannotate=funannotate.funannotate:main'],
},
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='BSD-2',
#scripts=['scripts/funannotate'],
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Operating System :: Unix',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
cmdclass={
'upload': UploadCommand,
},
)
| {
"content_hash": "792df635d90236ec26be5172707b5dc0",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 86,
"avg_line_length": 28.023622047244096,
"alnum_prop": 0.6285473447597639,
"repo_name": "nextgenusfs/funannotate",
"id": "984abb21647625e816310bfd544e7dad1faf139e",
"size": "3705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "291"
},
{
"name": "Dockerfile",
"bytes": "2669"
},
{
"name": "JavaScript",
"bytes": "2771"
},
{
"name": "Perl",
"bytes": "138330"
},
{
"name": "Python",
"bytes": "1542730"
},
{
"name": "Scala",
"bytes": "1523"
},
{
"name": "Shell",
"bytes": "2930"
},
{
"name": "Singularity",
"bytes": "100"
}
],
"symlink_target": ""
} |
"""A helper class for reading in and dealing with tests expectations
for layout tests.
"""
from collections import defaultdict
import logging
import re
from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
_log = logging.getLogger(__name__)
# Test expectation and specifier constants.
#
# FIXME: range() starts with 0 which makes if expectation checks harder
# as PASS is 0.
(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX,
SLOW, REBASELINE, NEEDS_REBASELINE, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(19)
# FIXME: Perhas these two routines should be part of the Port instead?
BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
WEBKIT_BUG_PREFIX = 'webkit.org/b/'
CHROMIUM_BUG_PREFIX = 'crbug.com/'
V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
NAMED_BUG_PREFIX = 'Bug('
MISSING_KEYWORD = 'Missing'
NEEDS_REBASELINE_KEYWORD = 'NeedsRebaseline'
NEEDS_MANUAL_REBASELINE_KEYWORD = 'NeedsManualRebaseline'
class ParseError(Exception):
def __init__(self, warnings):
super(ParseError, self).__init__()
self.warnings = warnings
def __str__(self):
return '\n'.join(map(str, self.warnings))
def __repr__(self):
return 'ParseError(warnings=%s)' % self.warnings
class TestExpectationParser(object):
"""Provides parsing facilities for lines in the test_expectation.txt file."""
# FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make
# the case studdly-caps to match the actual file contents.
REBASELINE_MODIFIER = 'rebaseline'
NEEDS_REBASELINE_MODIFIER = 'needsrebaseline'
NEEDS_MANUAL_REBASELINE_MODIFIER = 'needsmanualrebaseline'
PASS_EXPECTATION = 'pass'
SKIP_MODIFIER = 'skip'
SLOW_MODIFIER = 'slow'
WONTFIX_MODIFIER = 'wontfix'
TIMEOUT_EXPECTATION = 'timeout'
MISSING_BUG_WARNING = 'Test lacks BUG specifier.'
def __init__(self, port, all_tests, is_lint_mode):
self._port = port
self._test_configuration_converter = TestConfigurationConverter(
set(port.all_test_configurations()), port.configuration_specifier_macros())
if all_tests:
self._all_tests = set(all_tests)
else:
self._all_tests = set()
self._is_lint_mode = is_lint_mode
def parse(self, filename, expectations_string):
expectation_lines = []
line_number = 0
for line in expectations_string.split("\n"):
line_number += 1
test_expectation = self._tokenize_line(filename, line, line_number)
self._parse_line(test_expectation)
expectation_lines.append(test_expectation)
return expectation_lines
def _create_expectation_line(self, test_name, expectations, file_name):
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.name = test_name
expectation_line.filename = file_name
expectation_line.expectations = expectations
return expectation_line
def expectation_line_for_test(self, test_name, expectations):
expectation_line = self._create_expectation_line(test_name, expectations, '<Bot TestExpectations>')
self._parse_line(expectation_line)
return expectation_line
def expectation_for_skipped_test(self, test_name):
if not self._port.test_exists(test_name):
_log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
expectation_line = self._create_expectation_line(test_name, [TestExpectationParser.PASS_EXPECTATION], '<Skipped file>')
expectation_line.expectations = [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]
expectation_line.is_skipped_outside_expectations_file = True
self._parse_line(expectation_line)
return expectation_line
def _parse_line(self, expectation_line):
if not expectation_line.name:
return
if not self._check_test_exists(expectation_line):
return
expectation_line.is_file = self._port.test_isfile(expectation_line.name)
if expectation_line.is_file:
expectation_line.path = expectation_line.name
else:
expectation_line.path = self._port.normalize_test_name(expectation_line.name)
self._collect_matching_tests(expectation_line)
self._parse_specifiers(expectation_line)
self._parse_expectations(expectation_line)
def _parse_specifier(self, specifier):
return specifier.lower()
def _parse_specifiers(self, expectation_line):
if self._is_lint_mode:
self._lint_line(expectation_line)
parsed_specifiers = set([self._parse_specifier(specifier) for specifier in expectation_line.specifiers])
expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(
parsed_specifiers, expectation_line.warnings)
def _lint_line(self, expectation_line):
expectations = [expectation.lower() for expectation in expectation_line.expectations]
if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations:
expectation_line.warnings.append(self.MISSING_BUG_WARNING)
if self.REBASELINE_MODIFIER in expectations:
expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
if self.NEEDS_REBASELINE_MODIFIER in expectations or self.NEEDS_MANUAL_REBASELINE_MODIFIER in expectations:
for test in expectation_line.matching_tests:
if self._port.reference_files(test):
expectation_line.warnings.append('A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline')
specifiers = [specifier.lower() for specifier in expectation_line.specifiers]
if (self.REBASELINE_MODIFIER in expectations or self.NEEDS_REBASELINE_MODIFIER in expectations) and ('debug' in specifiers or 'release' in specifiers):
expectation_line.warnings.append('A test cannot be rebaselined for Debug/Release.')
def _parse_expectations(self, expectation_line):
result = set()
for part in expectation_line.expectations:
expectation = TestExpectations.expectation_from_string(part)
if expectation is None: # Careful, PASS is currently 0.
expectation_line.warnings.append('Unsupported expectation: %s' % part)
continue
result.add(expectation)
expectation_line.parsed_expectations = result
def _check_test_exists(self, expectation_line):
# WebKit's way of skipping tests is to add a -disabled suffix.
# So we should consider the path existing if the path or the
# -disabled version exists.
if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
# Log a warning here since you hit this case any
# time you update TestExpectations without syncing
# the LayoutTests directory
expectation_line.warnings.append('Path does not exist.')
return False
return True
def _collect_matching_tests(self, expectation_line):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
if not self._all_tests:
expectation_line.matching_tests = [expectation_line.path]
return
if not expectation_line.is_file:
# this is a test category, return all the tests of the category.
expectation_line.matching_tests = [test for test in self._all_tests if test.startswith(expectation_line.path)]
return
# this is a test file, do a quick check if it's in the
# full test suite.
if expectation_line.path in self._all_tests:
expectation_line.matching_tests.append(expectation_line.path)
# FIXME: Update the original specifiers and remove this once the old syntax is gone.
_configuration_tokens_list = [
'Mac', 'Mac10.9', 'Mac10.10', 'Mac10.11', 'Retina',
'Win', 'Win7', 'Win10',
'Linux', 'Precise', 'Trusty',
'Android',
'Release',
'Debug',
]
_configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
_inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
# FIXME: Update the original specifiers list and remove this once the old syntax is gone.
_expectation_tokens = {
'Crash': 'CRASH',
'Leak': 'LEAK',
'Failure': 'FAIL',
MISSING_KEYWORD: 'MISSING',
'Pass': 'PASS',
'Rebaseline': 'REBASELINE',
NEEDS_REBASELINE_KEYWORD: 'NEEDSREBASELINE',
NEEDS_MANUAL_REBASELINE_KEYWORD: 'NEEDSMANUALREBASELINE',
'Skip': 'SKIP',
'Slow': 'SLOW',
'Timeout': 'TIMEOUT',
'WontFix': 'WONTFIX',
}
_inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
[('TEXT', 'Failure'), ('IMAGE', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
# FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
@classmethod
def _tokenize_line(cls, filename, expectation_string, line_number):
"""Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
The new format for a test expectation line is:
[[bugs] [ "[" <configuration specifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
Any errant whitespace is not preserved.
"""
expectation_line = TestExpectationLine()
expectation_line.original_string = expectation_string
expectation_line.filename = filename
expectation_line.line_numbers = str(line_number)
comment_index = expectation_string.find("#")
if comment_index == -1:
comment_index = len(expectation_string)
else:
expectation_line.comment = expectation_string[comment_index + 1:]
remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
if len(remaining_string) == 0:
return expectation_line
# special-case parsing this so that we fail immediately instead of treating this as a test name
if remaining_string.startswith('//'):
expectation_line.warnings = ['use "#" instead of "//" for comments']
return expectation_line
bugs = []
specifiers = []
name = None
expectations = []
warnings = []
has_unrecognized_expectation = False
tokens = remaining_string.split()
state = 'start'
for token in tokens:
if (token.startswith(WEBKIT_BUG_PREFIX) or
token.startswith(CHROMIUM_BUG_PREFIX) or
token.startswith(V8_BUG_PREFIX) or
token.startswith(NAMED_BUG_PREFIX)):
if state != 'start':
warnings.append('"%s" is not at the start of the line.' % token)
break
if token.startswith(WEBKIT_BUG_PREFIX):
bugs.append(token)
elif token.startswith(CHROMIUM_BUG_PREFIX):
bugs.append(token)
elif token.startswith(V8_BUG_PREFIX):
bugs.append(token)
else:
match = re.match('Bug\((\w+)\)$', token)
if not match:
warnings.append('unrecognized bug identifier "%s"' % token)
break
else:
bugs.append(token)
elif token == '[':
if state == 'start':
state = 'configuration'
elif state == 'name_found':
state = 'expectations'
else:
warnings.append('unexpected "["')
break
elif token == ']':
if state == 'configuration':
state = 'name'
elif state == 'expectations':
state = 'done'
else:
warnings.append('unexpected "]"')
break
elif token in ('//', ':', '='):
warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
break
elif state == 'configuration':
specifiers.append(cls._configuration_tokens.get(token, token))
elif state == 'expectations':
if token not in cls._expectation_tokens:
has_unrecognized_expectation = True
warnings.append('Unrecognized expectation "%s"' % token)
else:
expectations.append(cls._expectation_tokens.get(token, token))
elif state == 'name_found':
warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
break
else:
name = token
state = 'name_found'
if not warnings:
if not name:
warnings.append('Did not find a test name.')
elif state not in ('name_found', 'done'):
warnings.append('Missing a "]"')
if 'WONTFIX' in expectations and 'SKIP' not in expectations:
expectations.append('SKIP')
if ('SKIP' in expectations or 'WONTFIX' in expectations) and len(set(expectations) - set(['SKIP', 'WONTFIX'])):
warnings.append('A test marked Skip or WontFix must not have other expectations.')
if 'SLOW' in expectations and 'SlowTests' not in filename:
warnings.append('SLOW tests should ony be added to SlowTests and not to TestExpectations.')
if 'WONTFIX' in expectations and ('NeverFixTests' not in filename and 'StaleTestExpectations' not in filename):
warnings.append('WONTFIX tests should ony be added to NeverFixTests or StaleTestExpectations and not to TestExpectations.')
if 'NeverFixTests' in filename and expectations != ['WONTFIX', 'SKIP']:
warnings.append('Only WONTFIX expectations are allowed in NeverFixTests')
if 'SlowTests' in filename and expectations != ['SLOW']:
warnings.append('Only SLOW expectations are allowed in SlowTests')
if not expectations and not has_unrecognized_expectation:
warnings.append('Missing expectations.')
expectation_line.bugs = bugs
expectation_line.specifiers = specifiers
expectation_line.expectations = expectations
expectation_line.name = name
expectation_line.warnings = warnings
return expectation_line
@classmethod
def _split_space_separated(cls, space_separated_string):
"""Splits a space-separated string into an array."""
return [part.strip() for part in space_separated_string.strip().split(' ')]
class TestExpectationLine(object):
"""Represents a line in test expectations file."""
def __init__(self):
"""Initializes a blank-line equivalent of an expectation."""
self.original_string = None
self.filename = None # this is the path to the expectations file for this line
self.line_numbers = "0"
self.name = None # this is the path in the line itself
self.path = None # this is the normpath of self.name
self.bugs = []
self.specifiers = []
self.parsed_specifiers = []
self.matching_configurations = set()
self.expectations = []
self.parsed_expectations = set()
self.comment = None
self.matching_tests = []
self.warnings = []
self.is_skipped_outside_expectations_file = False
def __str__(self):
return "TestExpectationLine{name=%s, matching_configurations=%s, original_string=%s}" % (self.name, self.matching_configurations, self.original_string)
def __eq__(self, other):
return (self.original_string == other.original_string
and self.filename == other.filename
and self.line_numbers == other.line_numbers
and self.name == other.name
and self.path == other.path
and self.bugs == other.bugs
and self.specifiers == other.specifiers
and self.parsed_specifiers == other.parsed_specifiers
and self.matching_configurations == other.matching_configurations
and self.expectations == other.expectations
and self.parsed_expectations == other.parsed_expectations
and self.comment == other.comment
and self.matching_tests == other.matching_tests
and self.warnings == other.warnings
and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
def is_invalid(self):
return bool(self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING])
def is_flaky(self):
return len(self.parsed_expectations) > 1
def is_whitespace_or_comment(self):
return bool(re.match("^\s*$", self.original_string.split('#')[0]))
@staticmethod
def create_passing_expectation(test):
expectation_line = TestExpectationLine()
expectation_line.name = test
expectation_line.path = test
expectation_line.parsed_expectations = set([PASS])
expectation_line.expectations = set(['PASS'])
expectation_line.matching_tests = [test]
return expectation_line
@staticmethod
def merge_expectation_lines(line1, line2, model_all_expectations):
"""Merges the expectations of line2 into line1 and returns a fresh object."""
if line1 is None:
return line2
if line2 is None:
return line1
if model_all_expectations and line1.filename != line2.filename:
return line2
# Don't merge original_string or comment.
result = TestExpectationLine()
# We only care about filenames when we're linting, in which case the filenames are the same.
# Not clear that there's anything better to do when not linting and the filenames are different.
if model_all_expectations:
result.filename = line2.filename
result.line_numbers = line1.line_numbers + "," + line2.line_numbers
result.name = line1.name
result.path = line1.path
result.parsed_expectations = set(line1.parsed_expectations) | set(line2.parsed_expectations)
result.expectations = list(set(line1.expectations) | set(line2.expectations))
result.bugs = list(set(line1.bugs) | set(line2.bugs))
result.specifiers = list(set(line1.specifiers) | set(line2.specifiers))
result.parsed_specifiers = list(set(line1.parsed_specifiers) | set(line2.parsed_specifiers))
result.matching_configurations = set(line1.matching_configurations) | set(line2.matching_configurations)
result.matching_tests = list(list(set(line1.matching_tests) | set(line2.matching_tests)))
result.warnings = list(set(line1.warnings) | set(line2.warnings))
result.is_skipped_outside_expectations_file = line1.is_skipped_outside_expectations_file or line2.is_skipped_outside_expectations_file
return result
def to_string(self, test_configuration_converter, include_specifiers=True, include_expectations=True, include_comment=True):
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string]
for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
if self.is_invalid():
return self.original_string or ''
if self.name is None:
return '' if self.comment is None else "#%s" % self.comment
if test_configuration_converter and self.bugs:
specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
result = []
for specifiers in specifiers_list:
# FIXME: this is silly that we join the specifiers and then immediately split them.
specifiers = self._serialize_parsed_specifiers(test_configuration_converter, specifiers).split()
expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
result.append(self._format_line(self.bugs, specifiers, self.name, expectations, self.comment))
return "\n".join(result) if result else None
return self._format_line(self.bugs, self.specifiers, self.name, self.expectations, self.comment,
include_specifiers, include_expectations, include_comment)
def to_csv(self):
# Note that this doesn't include the comments.
return '%s,%s,%s,%s' % (self.name, ' '.join(self.bugs), ' '.join(self.specifiers), ' '.join(self.expectations))
def _serialize_parsed_expectations(self, parsed_expectation_to_string):
result = []
for index in TestExpectations.EXPECTATIONS.values():
if index in self.parsed_expectations:
result.append(parsed_expectation_to_string[index])
return ' '.join(result)
def _serialize_parsed_specifiers(self, test_configuration_converter, specifiers):
result = []
result.extend(sorted(self.parsed_specifiers))
result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
return ' '.join(result)
@staticmethod
def _filter_redundant_expectations(expectations):
if set(expectations) == set(['Pass', 'Skip']):
return ['Skip']
if set(expectations) == set(['Pass', 'Slow']):
return ['Slow']
return expectations
@staticmethod
def _format_line(bugs, specifiers, name, expectations, comment, include_specifiers=True, include_expectations=True, include_comment=True):
new_specifiers = []
new_expectations = []
for specifier in specifiers:
# FIXME: Make this all work with the mixed-cased specifiers (e.g. WontFix, Slow, etc).
specifier = specifier.upper()
new_specifiers.append(TestExpectationParser._inverted_configuration_tokens.get(specifier, specifier))
for expectation in expectations:
expectation = expectation.upper()
new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
result = ''
if include_specifiers and (bugs or new_specifiers):
if bugs:
result += ' '.join(bugs) + ' '
if new_specifiers:
result += '[ %s ] ' % ' '.join(new_specifiers)
result += name
if include_expectations and new_expectations:
new_expectations = TestExpectationLine._filter_redundant_expectations(new_expectations)
result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
if include_comment and comment is not None:
result += " #%s" % comment
return result
# FIXME: Refactor API to be a proper CRUD.
class TestExpectationsModel(object):
"""Represents relational store of all expectations and provides CRUD semantics to manage it."""
def __init__(self, shorten_filename=None):
# Maps a test to its list of expectations.
self._test_to_expectations = {}
# Maps a test to list of its specifiers (string values)
self._test_to_specifiers = {}
# Maps a test to a TestExpectationLine instance.
self._test_to_expectation_line = {}
self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
self._shorten_filename = shorten_filename or (lambda x: x)
def _merge_test_map(self, self_map, other_map):
for test in other_map:
new_expectations = set(other_map[test])
if test in self_map:
new_expectations |= set(self_map[test])
self_map[test] = list(new_expectations) if isinstance(other_map[test], list) else new_expectations
def _merge_dict_of_sets(self, self_dict, other_dict):
for key in other_dict:
self_dict[key] |= other_dict[key]
def merge_model(self, other):
self._merge_test_map(self._test_to_expectations, other._test_to_expectations)
# merge_expectation_lines is O(tests per line). Therefore, this loop
# is O((tests per line)^2) which is really expensive when a line
# contains a lot of tests. Cache the output of merge_expectation_lines
# so that we only call that n^2 in the number of *lines*.
merge_lines_cache = defaultdict(dict)
for test, other_line in other._test_to_expectation_line.items():
merged_line = None
if test in self._test_to_expectation_line:
self_line = self._test_to_expectation_line[test]
if other_line not in merge_lines_cache[self_line]:
merge_lines_cache[self_line][other_line] = TestExpectationLine.merge_expectation_lines(
self_line, other_line, model_all_expectations=False)
merged_line = merge_lines_cache[self_line][other_line]
else:
merged_line = other_line
self._test_to_expectation_line[test] = merged_line
self._merge_dict_of_sets(self._expectation_to_tests, other._expectation_to_tests)
self._merge_dict_of_sets(self._timeline_to_tests, other._timeline_to_tests)
self._merge_dict_of_sets(self._result_type_to_tests, other._result_type_to_tests)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
each constant to an empty set."""
d = {}
for c in strings_to_constants.values():
d[c] = set()
return d
def get_test_set(self, expectation, include_skips=True):
tests = self._expectation_to_tests[expectation]
if not include_skips:
tests = tests - self.get_test_set(SKIP)
return tests
def get_test_set_for_keyword(self, keyword):
expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
if expectation_enum is not None:
return self._expectation_to_tests[expectation_enum]
matching_tests = set()
for test, specifiers in self._test_to_specifiers.iteritems():
if keyword.lower() in specifiers:
matching_tests.add(test)
return matching_tests
def get_tests_with_result_type(self, result_type):
return self._result_type_to_tests[result_type]
def get_tests_with_timeline(self, timeline):
return self._timeline_to_tests[timeline]
def has_test(self, test):
return test in self._test_to_expectation_line
def get_expectation_line(self, test):
return self._test_to_expectation_line.get(test)
def get_expectations(self, test):
return self._test_to_expectations[test]
def get_expectations_string(self, test):
"""Returns the expectatons for the given test as an uppercase string.
If there are no expectations for the test, then "PASS" is returned."""
if self.get_expectation_line(test).is_skipped_outside_expectations_file:
return 'NOTRUN'
expectations = self.get_expectations(test)
retval = []
# FIXME: WontFix should cause the test to get skipped without artificially adding SKIP to the expectations list.
if WONTFIX in expectations and SKIP in expectations:
expectations.remove(SKIP)
for expectation in expectations:
retval.append(self.expectation_to_string(expectation))
return " ".join(retval)
def expectation_to_string(self, expectation):
"""Return the uppercased string equivalent of a given expectation."""
for item in TestExpectations.EXPECTATIONS.items():
if item[1] == expectation:
return item[0].upper()
raise ValueError(expectation)
def remove_expectation_line(self, test):
if not self.has_test(test):
return
self._clear_expectations_for_test(test)
del self._test_to_expectation_line[test]
def add_expectation_line(self, expectation_line,
model_all_expectations=False):
"""Returns a list of warnings encountered while matching specifiers."""
if expectation_line.is_invalid():
return
for test in expectation_line.matching_tests:
if self._already_seen_better_match(test, expectation_line):
continue
if model_all_expectations:
expectation_line = TestExpectationLine.merge_expectation_lines(
self.get_expectation_line(test), expectation_line, model_all_expectations)
self._clear_expectations_for_test(test)
self._test_to_expectation_line[test] = expectation_line
self._add_test(test, expectation_line)
def _add_test(self, test, expectation_line):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
use _clear_expectations_for_test() to reset the state prior to
calling this."""
self._test_to_expectations[test] = expectation_line.parsed_expectations
for expectation in expectation_line.parsed_expectations:
self._expectation_to_tests[expectation].add(test)
self._test_to_specifiers[test] = expectation_line.specifiers
if WONTFIX in expectation_line.parsed_expectations:
self._timeline_to_tests[WONTFIX].add(test)
else:
self._timeline_to_tests[NOW].add(test)
if SKIP in expectation_line.parsed_expectations:
self._result_type_to_tests[SKIP].add(test)
elif expectation_line.parsed_expectations == set([PASS]):
self._result_type_to_tests[PASS].add(test)
elif expectation_line.is_flaky():
self._result_type_to_tests[FLAKY].add(test)
else:
# FIXME: What is this?
self._result_type_to_tests[FAIL].add(test)
def _clear_expectations_for_test(self, test):
"""Remove prexisting expectations for this test.
This happens if we are seeing a more precise path
than a previous listing.
"""
if self.has_test(test):
self._test_to_expectations.pop(test, '')
self._remove_from_sets(test, self._expectation_to_tests)
self._remove_from_sets(test, self._timeline_to_tests)
self._remove_from_sets(test, self._result_type_to_tests)
def _remove_from_sets(self, test, dict_of_sets_of_tests):
"""Removes the given test from the sets in the dictionary.
Args:
test: test to look for
dict: dict of sets of files"""
for set_of_tests in dict_of_sets_of_tests.itervalues():
if test in set_of_tests:
set_of_tests.remove(test)
def _already_seen_better_match(self, test, expectation_line):
"""Returns whether we've seen a better match already in the file.
Returns True if we've already seen a expectation_line.name that matches more of the test
than this path does
"""
# FIXME: See comment below about matching test configs and specificity.
if not self.has_test(test):
# We've never seen this test before.
return False
prev_expectation_line = self._test_to_expectation_line[test]
if prev_expectation_line.filename != expectation_line.filename:
# We've moved on to a new expectation file, which overrides older ones.
return False
if len(prev_expectation_line.path) > len(expectation_line.path):
# The previous path matched more of the test.
return True
if len(prev_expectation_line.path) < len(expectation_line.path):
# This path matches more of the test.
return False
# At this point we know we have seen a previous exact match on this
# base path, so we need to check the two sets of specifiers.
# FIXME: This code was originally designed to allow lines that matched
# more specifiers to override lines that matched fewer specifiers.
# However, we currently view these as errors.
#
# To use the "more specifiers wins" policy, change the errors for overrides
# to be warnings and return False".
if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%s and %s:%s.' % (
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(
prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
# FIXME: return False if we want more specific to win.
return True
if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(
expectation_line.filename), expectation_line.line_numbers,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
expectation_line.warnings.append('Entries for %s on lines %s:%s and %s:%s match overlapping sets of configurations.' % (expectation_line.name,
self._shorten_filename(
prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
# Configuration sets are disjoint, then.
return False
class TestExpectations(object):
"""Test expectations consist of lines with specifications of what
to expect from layout test cases. The test cases can be directories
in which case the expectations apply to all test cases in that
directory and any subdirectory. The format is along the lines of:
LayoutTests/fast/js/fixme.js [ Failure ]
LayoutTests/fast/js/flaky.js [ Failure Pass ]
LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
...
To add specifiers:
LayoutTests/fast/js/no-good.js
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
Skip: Doesn't run the test.
Slow: The test takes a long time to run, but does not timeout indefinitely.
WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
Notes:
-A test cannot be both SLOW and TIMEOUT
-A test can be included twice, but not via the same path.
-If a test is included twice, then the more precise path wins.
-CRASH tests cannot be WONTFIX
"""
# FIXME: Update to new syntax once the old format is no longer supported.
EXPECTATIONS = {'pass': PASS,
'audio': AUDIO,
'fail': FAIL,
'image': IMAGE,
'image+text': IMAGE_PLUS_TEXT,
'text': TEXT,
'timeout': TIMEOUT,
'crash': CRASH,
'leak': LEAK,
'missing': MISSING,
TestExpectationParser.SKIP_MODIFIER: SKIP,
TestExpectationParser.NEEDS_REBASELINE_MODIFIER: NEEDS_REBASELINE,
TestExpectationParser.NEEDS_MANUAL_REBASELINE_MODIFIER: NEEDS_MANUAL_REBASELINE,
TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
TestExpectationParser.SLOW_MODIFIER: SLOW,
TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
}
EXPECTATIONS_TO_STRING = dict((k, v) for (v, k) in EXPECTATIONS.iteritems())
# (aggregated by category, pass/fail/skip, type)
EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
PASS: 'passes',
FAIL: 'failures',
IMAGE: 'image-only failures',
TEXT: 'text-only failures',
IMAGE_PLUS_TEXT: 'image and text failures',
AUDIO: 'audio failures',
CRASH: 'crashes',
LEAK: 'leaks',
TIMEOUT: 'timeouts',
MISSING: 'missing results'}
NON_TEST_OUTCOME_EXPECTATIONS = (REBASELINE, SKIP, SLOW, WONTFIX)
BUILD_TYPES = ('debug', 'release')
TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
'now': NOW}
RESULT_TYPES = {'skip': SKIP,
'pass': PASS,
'fail': FAIL,
'flaky': FLAKY}
@classmethod
def expectation_from_string(cls, string):
assert(' ' not in string) # This only handles one expectation at a time.
return cls.EXPECTATIONS.get(string.lower())
@staticmethod
def result_was_expected(result, expected_results, test_needs_rebaselining):
"""Returns whether we got a result we were expecting.
Args:
result: actual result of a test execution
expected_results: set of results listed in test_expectations
test_needs_rebaselining: whether test was marked as REBASELINE"""
if not (set(expected_results) - (set(TestExpectations.NON_TEST_OUTCOME_EXPECTATIONS))):
expected_results = set([PASS])
if result in expected_results:
return True
if result in (PASS, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results or NEEDS_MANUAL_REBASELINE in expected_results):
return True
if result in (TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
return True
if result == MISSING and test_needs_rebaselining:
return True
if result == SKIP:
return True
return False
@staticmethod
def remove_pixel_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any pixel failures and return the remaining expectations. For example,
if we're not running pixel tests, then tests expected to fail as IMAGE
will PASS."""
expected_results = expected_results.copy()
if IMAGE in expected_results:
expected_results.remove(IMAGE)
expected_results.add(PASS)
return expected_results
@staticmethod
def remove_non_sanitizer_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any failures that the sanitizers don't care about."""
expected_results = expected_results.copy()
for result in (IMAGE, FAIL, IMAGE_PLUS_TEXT):
if result in expected_results:
expected_results.remove(result)
expected_results.add(PASS)
return expected_results
@staticmethod
def has_pixel_failures(actual_results):
return IMAGE in actual_results or FAIL in actual_results
@staticmethod
def suffixes_for_expectations(expectations):
suffixes = set()
if IMAGE in expectations:
suffixes.add('png')
if FAIL in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return set(suffixes)
@staticmethod
def suffixes_for_actual_expectations_string(expectations):
suffixes = set()
if 'TEXT' in expectations:
suffixes.add('txt')
if 'IMAGE' in expectations:
suffixes.add('png')
if 'AUDIO' in expectations:
suffixes.add('wav')
if 'MISSING' in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return suffixes
# FIXME: This constructor does too much work. We should move the actual parsing of
# the expectations into separate routines so that linting and handling overrides
# can be controlled separately, and the constructor can be more of a no-op.
def __init__(self, port, tests=None, include_overrides=True, expectations_dict=None, model_all_expectations=False, is_lint_mode=False):
self._full_test_list = tests
self._test_config = port.test_configuration()
self._is_lint_mode = is_lint_mode
self._model_all_expectations = self._is_lint_mode or model_all_expectations
self._model = TestExpectationsModel(self._shorten_filename)
self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
self._port = port
self._skipped_tests_warnings = []
self._expectations = []
if not expectations_dict:
expectations_dict = port.expectations_dict()
# Always parse the generic expectations (the generic file is required
# to be the first one in the expectations_dict, which must be an OrderedDict).
generic_path, generic_exps = expectations_dict.items()[0]
expectations = self._parser.parse(generic_path, generic_exps)
self._add_expectations(expectations, self._model)
self._expectations += expectations
# Now add the overrides if so requested.
if include_overrides:
for path, contents in expectations_dict.items()[1:]:
expectations = self._parser.parse(path, contents)
model = TestExpectationsModel(self._shorten_filename)
self._add_expectations(expectations, model)
self._expectations += expectations
self._model.merge_model(model)
# FIXME: move ignore_tests into port.skipped_layout_tests()
self.add_extra_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
self.add_expectations_from_bot()
self._has_warnings = False
self._report_warnings()
self._process_tests_without_expectations()
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
def model(self):
return self._model
def get_needs_rebaseline_failures(self):
return self._model.get_test_set(NEEDS_REBASELINE)
def get_rebaselining_failures(self):
return self._model.get_test_set(REBASELINE)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_expectations(self, test):
return self._model.get_expectations(test)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_result_type(self, result_type):
return self._model.get_tests_with_result_type(result_type)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_test_set(self, expectation, include_skips=True):
return self._model.get_test_set(expectation, include_skips)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_timeline(self, timeline):
return self._model.get_tests_with_timeline(timeline)
def get_expectations_string(self, test):
return self._model.get_expectations_string(test)
def expectation_to_string(self, expectation):
return self._model.expectation_to_string(expectation)
def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, sanitizer_is_enabled):
expected_results = self._model.get_expectations(test)
if sanitizer_is_enabled:
expected_results = self.remove_non_sanitizer_failures(expected_results)
elif not pixel_tests_are_enabled:
expected_results = self.remove_pixel_failures(expected_results)
return self.result_was_expected(result, expected_results, self.is_rebaselining(test))
def is_rebaselining(self, test):
return REBASELINE in self._model.get_expectations(test)
def _shorten_filename(self, filename):
if filename.startswith(self._port.path_from_webkit_base()):
return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
return filename
def _report_warnings(self):
warnings = []
for expectation in self._expectations:
for warning in expectation.warnings:
warnings.append('%s:%s %s %s' % (self._shorten_filename(expectation.filename), expectation.line_numbers,
warning, expectation.name if expectation.expectations else expectation.original_string))
if warnings:
self._has_warnings = True
if self._is_lint_mode:
raise ParseError(warnings)
_log.warning('--lint-test-files warnings:')
for warning in warnings:
_log.warning(warning)
_log.warning('')
def _process_tests_without_expectations(self):
if self._full_test_list:
for test in self._full_test_list:
if not self._model.has_test(test):
self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
def has_warnings(self):
return self._has_warnings
def remove_configurations(self, removals):
expectations_to_remove = []
modified_expectations = []
for test, test_configuration in removals:
for expectation in self._expectations:
if expectation.name != test or not expectation.parsed_expectations:
continue
if test_configuration not in expectation.matching_configurations:
continue
expectation.matching_configurations.remove(test_configuration)
if expectation.matching_configurations:
modified_expectations.append(expectation)
else:
expectations_to_remove.append(expectation)
for expectation in expectations_to_remove:
index = self._expectations.index(expectation)
self._expectations.remove(expectation)
if index == len(self._expectations) or self._expectations[index].is_whitespace_or_comment():
while index and self._expectations[index - 1].is_whitespace_or_comment():
index = index - 1
self._expectations.pop(index)
return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
def _add_expectations(self, expectation_list, model):
for expectation_line in expectation_list:
if not expectation_line.expectations:
continue
if self._model_all_expectations or self._test_config in expectation_line.matching_configurations:
model.add_expectation_line(expectation_line, model_all_expectations=self._model_all_expectations)
def add_extra_skipped_tests(self, tests_to_skip):
if not tests_to_skip:
return
for test in self._expectations:
if test.name and test.name in tests_to_skip:
test.warnings.append('%s:%s %s is also in a Skipped file.' % (test.filename, test.line_numbers, test.name))
model = TestExpectationsModel(self._shorten_filename)
for test_name in tests_to_skip:
expectation_line = self._parser.expectation_for_skipped_test(test_name)
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectations_from_bot(self):
# FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
# dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
bot_expectations = self._port.bot_expectations()
model = TestExpectationsModel(self._shorten_filename)
for test_name in bot_expectations:
expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
# Unexpected results are merged into existing expectations.
merge = self._port.get_option('ignore_flaky_tests') == 'unexpected'
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectation_line(self, expectation_line):
self._model.add_expectation_line(expectation_line)
self._expectations += [expectation_line]
def remove_expectation_line(self, test):
if not self._model.has_test(test):
return
self._expectations.remove(self._model.get_expectation_line(test))
self._model.remove_expectation_line(test)
@staticmethod
def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
def serialize(expectation_line):
# If reconstitute_only_these is an empty list, we want to return original_string.
# So we need to compare reconstitute_only_these to None, not just check if it's falsey.
if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
return expectation_line.to_string(test_configuration_converter)
return expectation_line.original_string
def nones_out(expectation_line):
return expectation_line is not None
return "\n".join(filter(nones_out, map(serialize, expectation_lines)))
| {
"content_hash": "908cadd17b2ed6916c84e797f9db5ad0",
"timestamp": "",
"source": "github",
"line_count": 1145,
"max_line_length": 214,
"avg_line_length": 45.83318777292576,
"alnum_prop": 0.6225918939004174,
"repo_name": "was4444/chromium.src",
"id": "df9e5ec35b06e1a5ea170e62dfb3517e34b3024f",
"size": "54009",
"binary": false,
"copies": "1",
"ref": "refs/heads/nw15",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from .core import Clusteror
from .plot import scatter_plot_two_dim_group_data
from .plot import hist_plot_one_dim_group_data
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| {
"content_hash": "a12e65a3128d545649e20ff4aa178c31",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 49,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.7695852534562212,
"repo_name": "enfeizhan/clusteror",
"id": "39add29349aace9f5dbd1e0fdd3f1c097e528295",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clusteror/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "489531"
},
{
"name": "Python",
"bytes": "276459"
}
],
"symlink_target": ""
} |
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def antibody_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'Test Antibody',
'antibody_name': 'test-Ab',
'antibody_product_no': '123'
}
@pytest.fixture
def post_antibody_vendor(testapp, lab, award):
item = {'lab': lab['@id'],
'award': award['@id'],
'title': 'Vendor Biolabs'}
return testapp.post_json('/vendor', item).json['@graph'][0]
@pytest.fixture
def ab_w_name(testapp, antibody_data):
return testapp.post_json('/antibody', antibody_data).json['@graph'][0]
def test_antibody_update_antibody_id(ab_w_name):
assert ab_w_name['antibody_id'] == 'test-Ab-123'
def test_antibody_display_title(testapp, ab_w_name, post_antibody_vendor):
assert ab_w_name['display_title'] == 'test-Ab (123)'
res = testapp.patch_json(
ab_w_name['@id'],
{'antibody_vendor': post_antibody_vendor['@id']}
).json['@graph'][0]
assert res['display_title'] == 'test-Ab (Vendor Biolabs, 123)'
| {
"content_hash": "e992d21dfec6e9f5532d7b56a2f46826",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 27.75609756097561,
"alnum_prop": 0.6124780316344464,
"repo_name": "4dn-dcic/fourfront",
"id": "0a44f8d7495d0e430795beab06168a8091e17415",
"size": "1138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/encoded/tests/test_types_antibody.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Workflow Language",
"bytes": "15818"
},
{
"name": "Dockerfile",
"bytes": "6312"
},
{
"name": "HTML",
"bytes": "11048"
},
{
"name": "JavaScript",
"bytes": "2106661"
},
{
"name": "Makefile",
"bytes": "9079"
},
{
"name": "PLpgSQL",
"bytes": "12067"
},
{
"name": "Python",
"bytes": "1758496"
},
{
"name": "SCSS",
"bytes": "224522"
},
{
"name": "Shell",
"bytes": "19014"
}
],
"symlink_target": ""
} |
import testtools
from neutron.hacking import checks
from neutron.tests import base
class HackingTestCase(base.BaseTestCase):
def assertLinePasses(self, func, line):
with testtools.ExpectedException(StopIteration):
next(func(line))
def assertLineFails(self, func, line):
self.assertIsInstance(next(func(line)), tuple)
def test_log_translations(self):
expected_marks = {
'error': '_LE',
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
logs = expected_marks.keys()
debug = "LOG.debug('OK')"
self.assertEqual(
0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
for log in logs:
bad = 'LOG.%s("Bad")' % log
self.assertEqual(
1, len(list(checks.validate_log_translations(bad, bad, 'f'))))
ok = "LOG.%s('OK') # noqa" % log
self.assertEqual(
0, len(list(checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s(variable)" % log
self.assertEqual(
0, len(list(checks.validate_log_translations(ok, ok, 'f'))))
for mark in checks._all_hints:
stmt = "LOG.%s(%s('test'))" % (log, mark)
self.assertEqual(
0 if expected_marks[log] == mark else 1,
len(list(checks.validate_log_translations(stmt, stmt,
'f'))))
def test_no_translate_debug_logs(self):
for hint in checks._all_hints:
bad = "LOG.debug(%s('bad'))" % hint
self.assertEqual(
1, len(list(checks.no_translate_debug_logs(bad, 'f'))))
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("N321: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(
__get_msg(method),
list(checks.use_jsonutils("json.%s(" % method,
"./neutron/common/rpc.py")))
self.assertEqual(0,
len(list(checks.use_jsonutils("jsonx.%s(" % method,
"./neutron/common/rpc.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.%sx(" % method,
"./neutron/common/rpc.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils(
"json.%s" % method,
"./neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/"
"plugins/netwrap"))))
def test_assert_called_once_with(self):
fail_code1 = """
mock = Mock()
mock.method(1, 2, 3, test='wow')
mock.method.assert_called_once()
"""
fail_code2 = """
mock = Mock()
mock.method(1, 2, 3, test='wow')
mock.method.assertCalledOnceWith()
"""
fail_code3 = """
mock = Mock()
mock.method(1, 2, 3, test='wow')
mock.method.assert_has_called()
"""
pass_code = """
mock = Mock()
mock.method(1, 2, 3, test='wow')
mock.method.assert_called_once_with()
"""
pass_code2 = """
mock = Mock()
mock.method(1, 2, 3, test='wow')
mock.method.assert_has_calls()
"""
self.assertEqual(
1, len(list(checks.check_assert_called_once_with(fail_code1,
"neutron/tests/test_assert.py"))))
self.assertEqual(
1, len(list(checks.check_assert_called_once_with(fail_code2,
"neutron/tests/test_assert.py"))))
self.assertEqual(
0, len(list(checks.check_assert_called_once_with(pass_code,
"neutron/tests/test_assert.py"))))
self.assertEqual(
1, len(list(checks.check_assert_called_once_with(fail_code3,
"neutron/tests/test_assert.py"))))
self.assertEqual(
0, len(list(checks.check_assert_called_once_with(pass_code2,
"neutron/tests/test_assert.py"))))
def test_check_oslo_namespace_imports(self):
f = checks.check_oslo_namespace_imports
self.assertLinePasses(f, 'from oslo_utils import importutils')
self.assertLinePasses(f, 'import oslo_messaging')
self.assertLineFails(f, 'from oslo.utils import importutils')
self.assertLineFails(f, 'from oslo import messaging')
self.assertLineFails(f, 'import oslo.messaging')
def test_check_python3_xrange(self):
f = checks.check_python3_xrange
self.assertLineFails(f, 'a = xrange(1000)')
self.assertLineFails(f, 'b =xrange ( 42 )')
self.assertLineFails(f, 'c = xrange(1, 10, 2)')
self.assertLinePasses(f, 'd = range(1000)')
self.assertLinePasses(f, 'e = six.moves.range(1337)')
def test_no_basestring(self):
self.assertEqual(1,
len(list(checks.check_no_basestring("isinstance(x, basestring)"))))
def test_check_python3_iteritems(self):
f = checks.check_python3_no_iteritems
self.assertLineFails(f, "d.iteritems()")
self.assertLinePasses(f, "six.iteritems(d)")
| {
"content_hash": "7e17370be876a38cbc3c276d79ee4c9d",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 40.24305555555556,
"alnum_prop": 0.4999137187230371,
"repo_name": "NeCTAR-RC/neutron",
"id": "7e2d81c0c130b3993c67b662c3d5f42e2cba3995",
"size": "6368",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/hacking/test_checks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7228162"
},
{
"name": "Shell",
"bytes": "12807"
}
],
"symlink_target": ""
} |
"""Test state helpers."""
import asyncio
from datetime import timedelta
from unittest.mock import patch
import pytest
from homeassistant.components.sun import STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON
from homeassistant.const import (
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_CLOSED,
STATE_HOME,
STATE_LOCKED,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_UNLOCKED,
)
import homeassistant.core as ha
from homeassistant.helpers import state
from homeassistant.util import dt as dt_util
from tests.common import async_mock_service
async def test_async_track_states(hass):
"""Test AsyncTrackStates context manager."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=5)
point3 = point2 + timedelta(seconds=5)
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point2
with state.AsyncTrackStates(hass) as states:
mock_utcnow.return_value = point1
hass.states.async_set("light.test", "on")
mock_utcnow.return_value = point2
hass.states.async_set("light.test2", "on")
state2 = hass.states.get("light.test2")
mock_utcnow.return_value = point3
hass.states.async_set("light.test3", "on")
state3 = hass.states.get("light.test3")
assert [state2, state3] == sorted(states, key=lambda state: state.entity_id)
async def test_call_to_component(hass):
"""Test calls to components state reproduction functions."""
with patch(
"homeassistant.components.media_player.reproduce_state.async_reproduce_states"
) as media_player_fun:
media_player_fun.return_value = asyncio.Future()
media_player_fun.return_value.set_result(None)
with patch(
"homeassistant.components.climate.reproduce_state.async_reproduce_states"
) as climate_fun:
climate_fun.return_value = asyncio.Future()
climate_fun.return_value.set_result(None)
state_media_player = ha.State("media_player.test", "bad")
state_climate = ha.State("climate.test", "bad")
context = "dummy_context"
await state.async_reproduce_state(
hass,
[state_media_player, state_climate],
context=context,
)
media_player_fun.assert_called_once_with(
hass, [state_media_player], context=context, reproduce_options=None
)
climate_fun.assert_called_once_with(
hass, [state_climate], context=context, reproduce_options=None
)
async def test_get_changed_since(hass):
"""Test get_changed_since."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=5)
point3 = point2 + timedelta(seconds=5)
with patch("homeassistant.core.dt_util.utcnow", return_value=point1):
hass.states.async_set("light.test", "on")
state1 = hass.states.get("light.test")
with patch("homeassistant.core.dt_util.utcnow", return_value=point2):
hass.states.async_set("light.test2", "on")
state2 = hass.states.get("light.test2")
with patch("homeassistant.core.dt_util.utcnow", return_value=point3):
hass.states.async_set("light.test3", "on")
state3 = hass.states.get("light.test3")
assert [state2, state3] == state.get_changed_since([state1, state2, state3], point2)
async def test_reproduce_with_no_entity(hass):
"""Test reproduce_state with no entity."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
await state.async_reproduce_state(hass, ha.State("light.test", "on"))
await hass.async_block_till_done()
assert len(calls) == 0
assert hass.states.get("light.test") is None
async def test_reproduce_turn_on(hass):
"""Test reproduce_state with SERVICE_TURN_ON."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
hass.states.async_set("light.test", "off")
await state.async_reproduce_state(hass, ha.State("light.test", "on"))
await hass.async_block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert last_call.domain == "light"
assert last_call.service == SERVICE_TURN_ON
assert last_call.data.get("entity_id") == "light.test"
async def test_reproduce_turn_off(hass):
"""Test reproduce_state with SERVICE_TURN_OFF."""
calls = async_mock_service(hass, "light", SERVICE_TURN_OFF)
hass.states.async_set("light.test", "on")
await state.async_reproduce_state(hass, ha.State("light.test", "off"))
await hass.async_block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert last_call.domain == "light"
assert last_call.service == SERVICE_TURN_OFF
assert last_call.data.get("entity_id") == "light.test"
async def test_reproduce_complex_data(hass):
"""Test reproduce_state with complex service data."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
hass.states.async_set("light.test", "off")
complex_data = [255, 100, 100]
await state.async_reproduce_state(
hass, ha.State("light.test", "on", {"rgb_color": complex_data})
)
await hass.async_block_till_done()
assert len(calls) > 0
last_call = calls[-1]
assert last_call.domain == "light"
assert last_call.service == SERVICE_TURN_ON
assert last_call.data.get("rgb_color") == complex_data
async def test_reproduce_bad_state(hass):
"""Test reproduce_state with bad state."""
calls = async_mock_service(hass, "light", SERVICE_TURN_ON)
hass.states.async_set("light.test", "off")
await state.async_reproduce_state(hass, ha.State("light.test", "bad"))
await hass.async_block_till_done()
assert len(calls) == 0
assert hass.states.get("light.test").state == "off"
async def test_as_number_states(hass):
"""Test state_as_number with states."""
zero_states = (
STATE_OFF,
STATE_CLOSED,
STATE_UNLOCKED,
STATE_BELOW_HORIZON,
STATE_NOT_HOME,
)
one_states = (STATE_ON, STATE_OPEN, STATE_LOCKED, STATE_ABOVE_HORIZON, STATE_HOME)
for _state in zero_states:
assert state.state_as_number(ha.State("domain.test", _state, {})) == 0
for _state in one_states:
assert state.state_as_number(ha.State("domain.test", _state, {})) == 1
async def test_as_number_coercion(hass):
"""Test state_as_number with number."""
for _state in ("0", "0.0", 0, 0.0):
assert state.state_as_number(ha.State("domain.test", _state, {})) == 0.0
for _state in ("1", "1.0", 1, 1.0):
assert state.state_as_number(ha.State("domain.test", _state, {})) == 1.0
async def test_as_number_invalid_cases(hass):
"""Test state_as_number with invalid cases."""
for _state in ("", "foo", "foo.bar", None, False, True, object, object()):
with pytest.raises(ValueError):
state.state_as_number(ha.State("domain.test", _state, {}))
| {
"content_hash": "53eac05653d21c01aee7c49af22daaf7",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 88,
"avg_line_length": 32.70232558139535,
"alnum_prop": 0.6424406201109373,
"repo_name": "turbokongen/home-assistant",
"id": "89b0f3c685088f423cdd8c4b007ccfb1f36fe0ef",
"size": "7031",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/helpers/test_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: f67a9b8459d
Revises: 10d66bb5392e
Create Date: 2015-01-06 23:38:59.524326
"""
# revision identifiers, used by Alembic.
revision = 'f67a9b8459d'
down_revision = '10d66bb5392e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('monkeys', sa.Column('friends_count', sa.Integer(), nullable=True))
op.create_index('ix_monkey_fCount_id', 'monkeys', ['friends_count', 'id'], unique=True)
op.create_index('ix_monkey_name_id', 'monkeys', ['name', 'id'], unique=True)
op.create_index(op.f('ix_monkeys_best_friend_id'), 'monkeys', ['best_friend_id'], unique=False)
op.drop_index('ix_monkeys_name', table_name='monkeys')
op.execute("""UPDATE monkeys SET friends_count =
(SELECT COUNT(*) FROM friends WHERE monkey_id = monkeys.id)""")
op.execute(sa.DDL("""
CREATE OR REPLACE FUNCTION process_change_monkey_friends_count()
RETURNS TRIGGER AS $change_monkey_friends_count$
BEGIN
IF (TG_OP = 'DELETE') THEN
UPDATE monkeys SET friends_count = friends_count - 1
WHERE id = OLD.monkey_id;
RETURN OLD;
ELSIF (TG_OP = 'INSERT') THEN
UPDATE monkeys SET friends_count = friends_count + 1
WHERE id = NEW.monkey_id;
RETURN NEW;
END IF;
RETURN NULL;
END;
$change_monkey_friends_count$ LANGUAGE plpgsql;
CREATE TRIGGER change_monkey_friends_count
AFTER INSERT OR DELETE ON friends
FOR EACH ROW EXECUTE PROCEDURE process_change_monkey_friends_count();
"""))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(sa.DDL("""
DROP TRIGGER change_monkey_friends_count ON friends;
DROP FUNCTION process_change_monkey_friends_count();
"""))
op.create_index('ix_monkeys_name', 'monkeys', ['name'], unique=False)
op.drop_index(op.f('ix_monkeys_best_friend_id'), table_name='monkeys')
op.drop_index('ix_monkey_name_id', table_name='monkeys')
op.drop_index('ix_monkey_fCount_id', table_name='monkeys')
op.drop_column('monkeys', 'friends_count')
### end Alembic commands ###
| {
"content_hash": "65045573de47c25b67d9e19217641c11",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 99,
"avg_line_length": 37.90163934426229,
"alnum_prop": 0.6371107266435986,
"repo_name": "qqalexqq/monkeys",
"id": "86af94f5deaa0dcf0bb04457d8fcdde645359ecc",
"size": "2312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/f67a9b8459d_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1629"
},
{
"name": "HTML",
"bytes": "12441"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "35074"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from demoapp import tasks
def foo(request):
r = tasks.add.delay(2, 2)
return HttpResponse(r.task_id)
| {
"content_hash": "1c398ebfafb67f16c8ff4b4a8620e395",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 36,
"avg_line_length": 18.625,
"alnum_prop": 0.7248322147651006,
"repo_name": "planorama/django-celery",
"id": "a1aab48cd7e69584feda2709460bc8c256f14466",
"size": "176",
"binary": false,
"copies": "3",
"ref": "refs/heads/3.0",
"path": "examples/demoproject/demoapp/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "210848"
},
{
"name": "Shell",
"bytes": "2065"
}
],
"symlink_target": ""
} |
from panda3d.core import *
class DNAStorage():
def __init__(self):
self.nodes = {}
self.catalogCodes = {}
self.fonts = []
self.textures = []
self.hoodNodes = {}
self.placeNodes = {}
self.visGroups = []
self.visGroupsAI = []
self.dnaGroups = []
def storeNode(self, node, p):
self.nodes[node] = p
def storeCatalogCode(self, root, code):
self.catalogCodes[root] = code
def storeFont(self, font, dir=None):
font = font.capitalize()
if font =='Mickey':
self.fonts.append(loader.loadFont('phase_3/models/fonts/'+ font + 'Font' + '.bam'))
elif font == 'Minnie':
self.fonts.append(loader.loadFont('phase_3/models/fonts/'+ font + 'Font' + '.bam'))
elif font == 'Suit': #TODO
pass
elif font == 'Tt_comedy':
self.fonts.append(loader.loadFont('phase_3/models/fonts/Comedy.bam'))
else:
self.fonts.append(loader.loadFont('phase_3/models/fonts/'+ font + '.bam'))
def storeTexture(self, name, texture):
self.textures.append({name: loader.loadTexture(texture)})
def storeHoodNode(self, node, p):
self.hoodNodes[node] = p
def storePlaceNode(self, node, p):
self.placeNodes[node] = p
def resetDNAVisGroups(self):
self.visGroups = []
def getNumDNAVisGroups(self):
return len(self.visGroups)
def resetDNAVisGroupsAI(self):
self.visGroupsAI = []
def resetPlaceNodes(self):
self.placeNodes = {}
def resetDNAGroups(self):
self.dnaGroups = []
def resetHood(self):
self.hoodNodes = {}
self.placeNodes = {} | {
"content_hash": "a20e32d08844cd5cf0b5f70e89e95831",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 86,
"avg_line_length": 24.633333333333333,
"alnum_prop": 0.6671177266576455,
"repo_name": "SkippsDev/libdna",
"id": "24f7fc5af8f5e595d74c81df886f5801b059dfcd",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/DNAStorage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "349493"
}
],
"symlink_target": ""
} |
from autotable import AutoTable
from datalog import DataHandler, StoreToH5, StoreToTxt, TextPrinter, DataLog
from bunch import *
from params import param_file
from backup import copy, copy_source, copy_directory, backup, initialise_backup, logfilename, saveplot
#Also note that misc changes default numpy output!
from misc import average_by, styles, styler
| {
"content_hash": "a4ca6b749170e3b609000f28c81c1270",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 102,
"avg_line_length": 32.81818181818182,
"alnum_prop": 0.8116343490304709,
"repo_name": "chrhartm/SORN",
"id": "e884d93e68ca061e4fb4c36190a2a198ccf551f4",
"size": "362",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "744836"
},
{
"name": "Matlab",
"bytes": "17838"
},
{
"name": "Python",
"bytes": "429030"
}
],
"symlink_target": ""
} |
from keywords import Keywords
from spot import Spot
class Bot:
def __init__(self):
self.keyword_fetcher = Keywords()
self.spot_client = Spot()
def fetch_spot(self, sentence):
result = self.keyword_fetcher.extract_from_sentence(sentence)
message = {}
message_body = ''
spot = self.spot_client.recommend_spot(list(result[1])[0], result[0])
if spot:
message_body += spot['name']
message_body += 'はどうでしょうか?'
message_body += 'オススメポイントは'
message_body += spot['reason']
message_body += ' です'
message['body'] = message_body
message['image'] = spot['image']
else:
message_body = '申し訳ありません、候補が見つかりませんでした'
message['body'] = message_body
return message
| {
"content_hash": "2fcd0b9751b1e8d6400782da1df7fcb6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 30.071428571428573,
"alnum_prop": 0.5486935866983373,
"repo_name": "yustoris/tis2016",
"id": "70c58b9eeb4497f219540abec3883a425e4775d7",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6685"
},
{
"name": "HTML",
"bytes": "2272"
},
{
"name": "JavaScript",
"bytes": "2508"
},
{
"name": "Python",
"bytes": "10263"
}
],
"symlink_target": ""
} |
from distutils.core import setup
import sys
reload(sys).setdefaultencoding('Utf-8')
setup(
name='gipsy_toolbar',
version='0.0.1',
author='Guillaume Pousseo',
author_email='[email protected]',
description='Manages a toolbar for admins with shortcuts to easily navigate to most\
relevant admin features.',
long_description=open('README.rst').read(),
url='http://www.revsquare.com',
license='BSD License',
platforms=['OS Independent'],
packages=['gipsy_toolbar'],
include_package_data=True,
classifiers=[
'Development Status :: 0.0.1 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Documentation',
],
install_requires=[
'gipsy_tools>=0.0.1',
],
dependency_links=[
'git+https://github.com/RevSquare/gipsy_tools.git#egg=gipsy_tools-0.0.1'
],
)
| {
"content_hash": "bdcda6787b579c8250323b2433fc4335",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 88,
"avg_line_length": 32.80555555555556,
"alnum_prop": 0.628281117696867,
"repo_name": "RevSquare/gipsy_toolbar",
"id": "577fc4ca81478ef37f7bd3a27356ade85e3dfe6c",
"size": "1204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3531"
},
{
"name": "Python",
"bytes": "7400"
}
],
"symlink_target": ""
} |
from django.core.management import BaseCommand
from django.core import management
import game.runtime as Runtime
import argparse
import json
#The class must be named Command, and subclass BaseCommand
class Command(BaseCommand):
# Show this when the user types help
help = "Start game creating map and other stuff"
def add_arguments(self, parser):
parser.add_argument(
'--generate',
action='store_true',
help='Force generation of new map',
)
parser.add_argument(
'--size',
nargs='?',
type=int,
help='Set size of map',
)
parser.add_argument(
'--time',
nargs='?',
type=int,
help='Set time (as tick rate) on server',
)
parser.add_argument(
'--duration',
nargs='?',
type=int,
help='Set ticks number for day cycling',
)
# A command must define handle()
def handle(self, *args, **options):
settings = {}
## Set size of map
if(options['size']):
settings["size"] = options['size']
## Generating a new map
if(options['generate']):
settings['generate'] = True
else:
settings['generate'] = False
## Set time of day (as a tick number)
if(options['time']):
settings['time'] = options['time']
## Set duration day (as a tick rate number)
if(options['duration']):
settings['duration'] = options['duration']
with open('game/settings.json', 'w') as outfile:
json.dump(settings, outfile)
Runtime.loadMap() | {
"content_hash": "f3f6e7ad17f3b21d4e319f8fd9874e22",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 58,
"avg_line_length": 22.158730158730158,
"alnum_prop": 0.660458452722063,
"repo_name": "DanAurea/Trisdanvalwen",
"id": "580062e79cf32647a0b3ddbec4e54b216a5dc2d2",
"size": "1396",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "game/management/commands/startServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8536"
},
{
"name": "HTML",
"bytes": "17736"
},
{
"name": "JavaScript",
"bytes": "186091"
},
{
"name": "Nginx",
"bytes": "4407"
},
{
"name": "Python",
"bytes": "58995"
},
{
"name": "Shell",
"bytes": "1050"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import curses
from rtv.submission_page import SubmissionPage
try:
from unittest import mock
except ImportError:
import mock
def test_submission_page_construct(reddit, terminal, config, oauth):
window = terminal.stdscr.subwin
url = ('https://www.reddit.com/r/Python/comments/2xmo63/'
'a_python_terminal_viewer_for_browsing_reddit')
with terminal.loader():
page = SubmissionPage(reddit, terminal, config, oauth, url=url)
assert terminal.loader.exception is None
# Toggle the second comment so we can check the draw more comments method
page.content.toggle(1)
# Set some special flags to make sure that we can draw them
submission_data = page.content.get(-1)
submission_data['gold'] = True
submission_data['stickied'] = True
submission_data['saved'] = True
submission_data['flair'] = 'flair'
# Set some special flags to make sure that we can draw them
comment_data = page.content.get(0)
comment_data['gold'] = True
comment_data['stickied'] = True
comment_data['saved'] = True
comment_data['flair'] = 'flair'
page.draw()
# Title
title = url[:terminal.stdscr.ncols-1].encode('utf-8')
window.addstr.assert_any_call(0, 0, title)
# Banner
menu = ('[1]hot '
'[2]top '
'[3]rising '
'[4]new '
'[5]controversial').encode('utf-8')
window.addstr.assert_any_call(0, 0, menu)
# Footer
text = ('[?]Help [q]Quit [h]Return [space]Fold/Expand [o]Open [c]Comment '
'[a/z]Vote'.encode('utf-8'))
window.addstr.assert_any_call(0, 0, text)
# Submission
submission_data = page.content.get(-1)
text = submission_data['title'].encode('utf-8')
window.subwin.addstr.assert_any_call(1, 1, text, 2097152)
assert window.subwin.border.called
# Comment
comment_data = page.content.get(0)
text = comment_data['split_body'][0].encode('utf-8')
window.subwin.addstr.assert_any_call(1, 1, text)
# More Comments
comment_data = page.content.get(1)
text = comment_data['body'].encode('utf-8')
window.subwin.addstr.assert_any_call(0, 1, text)
# Cursor should not be drawn when the page is first opened
assert not window.subwin.chgat.called
# Reload with a smaller terminal window
terminal.stdscr.ncols = 20
terminal.stdscr.nlines = 10
with terminal.loader():
page = SubmissionPage(reddit, terminal, config, oauth, url=url)
assert terminal.loader.exception is None
page.draw()
def test_submission_refresh(submission_page):
# Should be able to refresh content
submission_page.refresh_content()
def test_submission_exit(submission_page):
# Exiting should set active to false
submission_page.active = True
submission_page.controller.trigger('h')
assert not submission_page.active
def test_submission_unauthenticated(submission_page, terminal):
# Unauthenticated commands
methods = [
'a', # Upvote
'z', # Downvote
'c', # Comment
'e', # Edit
'd', # Delete
'w', # Save
]
for ch in methods:
submission_page.controller.trigger(ch)
text = 'Not logged in'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
def test_submission_open(submission_page, terminal):
# Open the selected link with the web browser
with mock.patch.object(terminal, 'open_browser'):
submission_page.controller.trigger(terminal.RETURN)
assert terminal.open_browser.called
def test_submission_prompt(submission_page, terminal):
# Prompt for a different subreddit
with mock.patch.object(terminal, 'prompt_input'):
# Valid input
submission_page.active = True
submission_page.selected_subreddit = None
terminal.prompt_input.return_value = 'front/top'
submission_page.controller.trigger('/')
assert not submission_page.active
assert submission_page.selected_subreddit
# Invalid input
submission_page.active = True
submission_page.selected_subreddit = None
terminal.prompt_input.return_value = 'front/pot'
submission_page.controller.trigger('/')
assert submission_page.active
assert not submission_page.selected_subreddit
def test_submission_order_top(submission_page, terminal):
# Sort by top - First time selects default
submission_page.controller.trigger('2')
assert submission_page.content.order == 'top'
# Second time opens the menu
with mock.patch.object(terminal, 'show_notification'):
# Invalid selection
terminal.show_notification.return_value = ord('x')
submission_page.controller.trigger('2')
terminal.show_notification.assert_called_with('Invalid option')
assert submission_page.content.order == 'top'
# Valid selection - sort by week
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('3')
submission_page.controller.trigger('2')
assert submission_page.content.order == 'top-week'
def test_submission_order_controversial(submission_page, terminal):
# Now do controversial
submission_page.controller.trigger('5')
assert submission_page.content.order == 'controversial'
with mock.patch.object(terminal, 'show_notification'):
# Invalid selection
terminal.show_notification.return_value = ord('x')
submission_page.controller.trigger('5')
terminal.show_notification.assert_called_with('Invalid option')
assert submission_page.content.order == 'controversial'
# Valid selection - sort by week
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('3')
submission_page.controller.trigger('5')
assert submission_page.content.order == 'controversial-week'
def test_submission_move_top_bottom(submission_page):
submission_page.controller.trigger('G')
assert submission_page.nav.absolute_index == 44
submission_page.controller.trigger('g')
submission_page.controller.trigger('g')
assert submission_page.nav.absolute_index == -1
def test_submission_pager(submission_page, terminal):
# View a submission with the pager
with mock.patch.object(terminal, 'open_pager'):
submission_page.controller.trigger('l')
assert terminal.open_pager.called
# Move down to the first comment
with mock.patch.object(submission_page, 'clear_input_queue'):
submission_page.controller.trigger('j')
# View a comment with the pager
with mock.patch.object(terminal, 'open_pager'):
submission_page.controller.trigger('l')
assert terminal.open_pager.called
def test_submission_comment_not_enough_space(submission_page, terminal):
# The first comment is 10 lines, shrink the screen so that it won't fit.
# Setting the terminal to 10 lines means that there will only be 8 lines
# available (after subtracting the header and footer) to draw the comment.
terminal.stdscr.nlines = 10
# Select the first comment
with mock.patch.object(submission_page, 'clear_input_queue'):
submission_page.move_cursor_down()
submission_page.draw()
text = '(Not enough space to display)'.encode('ascii')
window = terminal.stdscr.subwin
window.subwin.addstr.assert_any_call(6, 1, text)
def test_submission_vote(submission_page, refresh_token):
# Log in
submission_page.config.refresh_token = refresh_token
submission_page.oauth.authorize()
# Test voting on the submission
with mock.patch('praw.objects.Submission.upvote') as upvote, \
mock.patch('praw.objects.Submission.downvote') as downvote, \
mock.patch('praw.objects.Submission.clear_vote') as clear_vote:
data = submission_page.content.get(submission_page.nav.absolute_index)
# Upvote
submission_page.controller.trigger('a')
assert upvote.called
assert data['likes'] is True
# Clear vote
submission_page.controller.trigger('a')
assert clear_vote.called
assert data['likes'] is None
# Upvote
submission_page.controller.trigger('a')
assert upvote.called
assert data['likes'] is True
# Downvote
submission_page.controller.trigger('z')
assert downvote.called
assert data['likes'] is False
# Clear vote
submission_page.controller.trigger('z')
assert clear_vote.called
assert data['likes'] is None
# Upvote - exception
upvote.side_effect = KeyboardInterrupt
submission_page.controller.trigger('a')
assert data['likes'] is None
# Downvote - exception
downvote.side_effect = KeyboardInterrupt
submission_page.controller.trigger('a')
assert data['likes'] is None
def test_submission_save(submission_page, refresh_token):
# Log in
submission_page.config.refresh_token = refresh_token
submission_page.oauth.authorize()
# Test save on the submission
with mock.patch('praw.objects.Submission.save') as save, \
mock.patch('praw.objects.Submission.unsave') as unsave:
data = submission_page.content.get(submission_page.nav.absolute_index)
# Save
submission_page.controller.trigger('w')
assert save.called
assert data['saved'] is True
# Unsave
submission_page.controller.trigger('w')
assert unsave.called
assert data['saved'] is False
# Save - exception
save.side_effect = KeyboardInterrupt
submission_page.controller.trigger('w')
assert data['saved'] is False
def test_submission_comment_save(submission_page, terminal, refresh_token):
# Log in
submission_page.config.refresh_token = refresh_token
submission_page.oauth.authorize()
# Move down to the first comment
with mock.patch.object(submission_page, 'clear_input_queue'):
submission_page.controller.trigger('j')
# Test save on the coment submission
with mock.patch('praw.objects.Comment.save') as save, \
mock.patch('praw.objects.Comment.unsave') as unsave:
data = submission_page.content.get(submission_page.nav.absolute_index)
# Save
submission_page.controller.trigger('w')
assert save.called
assert data['saved'] is True
# Unsave
submission_page.controller.trigger('w')
assert unsave.called
assert data['saved'] is False
# Save - exception
save.side_effect = KeyboardInterrupt
submission_page.controller.trigger('w')
assert data['saved'] is False
def test_submission_comment(submission_page, terminal, refresh_token):
# Log in
submission_page.config.refresh_token = refresh_token
submission_page.oauth.authorize()
# Leave a comment
with mock.patch('praw.objects.Submission.add_comment') as add_comment, \
mock.patch.object(terminal, 'open_editor') as open_editor, \
mock.patch('time.sleep'):
open_editor.return_value.__enter__.return_value = 'comment text'
submission_page.controller.trigger('c')
assert open_editor.called
add_comment.assert_called_with('comment text')
def test_submission_delete(submission_page, terminal, refresh_token):
# Log in
submission_page.config.refresh_token = refresh_token
submission_page.oauth.authorize()
# Can't delete the submission
curses.flash.reset_mock()
submission_page.controller.trigger('d')
assert curses.flash.called
# Move down to the first comment
with mock.patch.object(submission_page, 'clear_input_queue'):
submission_page.controller.trigger('j')
# Try to delete the first comment - wrong author
curses.flash.reset_mock()
submission_page.controller.trigger('d')
assert curses.flash.called
# Spoof the author and try to delete again
data = submission_page.content.get(submission_page.nav.absolute_index)
data['author'] = submission_page.reddit.user.name
with mock.patch('praw.objects.Comment.delete') as delete, \
mock.patch.object(terminal.stdscr, 'getch') as getch, \
mock.patch('time.sleep'):
getch.return_value = ord('y')
submission_page.controller.trigger('d')
assert delete.called
def test_submission_edit(submission_page, terminal, refresh_token):
# Log in
submission_page.config.refresh_token = refresh_token
submission_page.oauth.authorize()
# Try to edit the submission - wrong author
data = submission_page.content.get(submission_page.nav.absolute_index)
data['author'] = 'some other person'
curses.flash.reset_mock()
submission_page.controller.trigger('e')
assert curses.flash.called
# Spoof the submission and try to edit again
data = submission_page.content.get(submission_page.nav.absolute_index)
data['author'] = submission_page.reddit.user.name
with mock.patch('praw.objects.Submission.edit') as edit, \
mock.patch.object(terminal, 'open_editor') as open_editor, \
mock.patch('time.sleep'):
open_editor.return_value.__enter__.return_value = 'submission text'
submission_page.controller.trigger('e')
assert open_editor.called
edit.assert_called_with('submission text')
# Move down to the first comment
with mock.patch.object(submission_page, 'clear_input_queue'):
submission_page.controller.trigger('j')
# Spoof the author and edit the comment
data = submission_page.content.get(submission_page.nav.absolute_index)
data['author'] = submission_page.reddit.user.name
with mock.patch('praw.objects.Comment.edit') as edit, \
mock.patch.object(terminal, 'open_editor') as open_editor, \
mock.patch('time.sleep'):
open_editor.return_value.__enter__.return_value = 'comment text'
submission_page.controller.trigger('e')
assert open_editor.called
edit.assert_called_with('comment text')
def test_submission_urlview(submission_page, terminal, refresh_token):
# Log in
submission_page.config.refresh_token = refresh_token
submission_page.oauth.authorize()
# Submission case
data = submission_page.content.get(submission_page.nav.absolute_index)
data['body'] = 'test comment body ❤'
with mock.patch.object(terminal, 'open_urlview') as open_urlview:
submission_page.controller.trigger('b')
open_urlview.assert_called_with('test comment body ❤')
# Subreddit case
data = submission_page.content.get(submission_page.nav.absolute_index)
data['text'] = ''
data['body'] = ''
data['url_full'] = 'http://test.url.com ❤'
with mock.patch.object(terminal, 'open_urlview') as open_urlview, \
mock.patch('subprocess.Popen'):
submission_page.controller.trigger('b')
open_urlview.assert_called_with('http://test.url.com ❤')
| {
"content_hash": "38f89ed258954e877a934ad5f14e6d2e",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 78,
"avg_line_length": 33.92017738359202,
"alnum_prop": 0.6655771996339391,
"repo_name": "shaggytwodope/rtv",
"id": "eb98a8de64bdcdb79f8729079a3b0bff1bc67b25",
"size": "15330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_submission.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "2601"
},
{
"name": "HTML",
"bytes": "698"
},
{
"name": "Python",
"bytes": "275325"
}
],
"symlink_target": ""
} |
from keywords._pagetests import _PageTestsKeywords
from keywords._draganddrop import _DragAndDropKeywords
from keywords._actionchains import _ActionChainsKeywords
class Selenium2LibraryExtensions(_PageTestsKeywords, _DragAndDropKeywords,
_ActionChainsKeywords):
"""Selenium2LibraryExtensions adds a number of keywords to the
Selenium2Library.
Note that in fact it does not extend the Selenium2Library.
Internally it accesses the Selenium2Library instance and uses the
underlying selenium browser.
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = '0.0.1'
def __init__(self, timeout=5.0, implicit_wait=0.0,
run_on_failure='Capture Page Screenshot'):
for base in Selenium2LibraryExtensions.__bases__:
if hasattr(base, '__init__'):
base.__init__(self)
| {
"content_hash": "5f2d9f9f9983fbc6c7a6d69f2f921e0c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 40.18181818181818,
"alnum_prop": 0.6877828054298643,
"repo_name": "idumpling/robotx",
"id": "5a6734f3a224c559046e5e45d4c90b42283320ef",
"size": "884",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "robotx/lib/seleniumext/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91789"
}
],
"symlink_target": ""
} |
"""
Provides an unused network
Provides workaround for limit of 31 networks with default docker setup
by providing a free network that can be specified to docker manually.
Known performance issues when creating many docker networks,
after 50 or so networks the time it takes to create a new network
starts becoming very noticeable (several seconds) and seems to grow with O(N^2)
together with the number of iptables rules, because docker creates iptables rules
from each bridge to each bridge.
This is not an issue when creating `--internal` networks, so prefer that when possible.
"""
from __future__ import unicode_literals, absolute_import
from subprocess import check_output, CalledProcessError
from collections import deque
from itertools import chain
from ipaddress import IPv4Network, IPv4Interface
from hammer.util import as_str, is_fabric1
__all__ = ['create_docker_network', 'DockerNetworkAllocator', 'OutOfNetworks']
DEFAULT_NETWORK_POOL = (
IPv4Network('10.0.0.0/8'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.168.0.0/20'))
class OutOfNetworks(RuntimeError):
pass
def remote_cmd(cmd, context=None):
if is_fabric1:
from fabric.api import sudo
# Fabric doesn't have any way to call commands with
# an actual list of arguments, and their shell_escape is
# a complete joke...
res = sudo(cmd)
else:
res = context.sudo(cmd)
if res.return_code != 0:
raise CalledProcessError(
returncode=res.return_code,
cmd=cmd,
output='-- stdout -- \n%s\n-- stderr--\n%s\n' % (res.stdout, res.stderr))
return res.stdout
def local_cmd(cmd, context=None):
# I know... see remote_cmd
return check_output(['sh', '-c', cmd])
def create_docker_network(name, internal=False, cmd=remote_cmd, prefix=24, pool=None, context=None):
"""
Note: With fabric2 the context argument must be set to is Connection
"""
allocator = DockerNetworkAllocator(cmd, context=context, pool=pool)
return allocator.create(name, internal=internal, prefix=prefix)
class DockerNetworkAllocator(object):
def __init__(self, cmd, context=None, pool=None):
"""
Docker network allocator
Arguments:
cmd(Callable[str, str]): Call a command
pool(List[IPv4Network]): Pool of networks to assign from
"""
self._cmd = cmd
self._context = context
if pool is None:
pool = DEFAULT_NETWORK_POOL
# Ensure it is sorted so we can be efficient when finding a free network
self.pool = list(sorted(pool))
def _docker(self, args):
# lord have mercy
cmd = ' '.join("'{}'".format(arg) for arg in chain(['docker'], args))
output = as_str(self._cmd(cmd, context=self._context)).strip()
if output == '':
return []
return [line.strip() for line in output.split('\n')]
def _networks_in_use(self):
return list(chain(
(
# Locally used networks
IPv4Interface(inet).network
for inet in self._cmd("ip -4 addr | grep 'inet ' | awk '{ print $2 }'", context=self._context).split()
if inet != ''
),
(
# Already routed ipv4 networks
IPv4Network(network)
for network in self._cmd("ip -4 route list | grep '^[0-9]' | awk '{ print $1 }'", context=self._context).split()
if network != ''
)
))
def _proposed_network(self, prefix):
networks_in_pool = (
subnet
for network in self.pool
for subnet in network.subnets(new_prefix=prefix)
)
used_networks = deque(sorted(self._networks_in_use()))
for network in networks_in_pool:
# This while block is purely for optimization,
# due to sorting of both networks_in_pool and used_networks
# this used network can never interfere again, so don't waste time on it.
while used_networks and \
used_networks[0].broadcast_address < network.network_address and \
not network.overlaps(used_networks[0]):
used_networks.popleft()
if not any(network.overlaps(used) for used in used_networks):
return network
def assign(self, prefix=24):
"""
Arguments:
prefix_length(int): Network prefix length (e.g. `24` for `/24`)
"""
proposed_network = self._proposed_network(prefix)
if proposed_network is None:
raise OutOfNetworks("Out of networks, contact your server administrator")
return proposed_network
def create(self, name, internal=False, prefix=24):
"""
Create a new docker network if it does not already exist
Arguments:
name(str): Network name
internal(bool): Internal network (--internal)
prefix(int): Network prefix
Returns:
bool: True if network was created, False if it already existed
"""
existing = self._docker(['network', 'ls', '--format', '{{.Name}}'])
if name in existing:
return False
cmd = chain(
('docker', 'network', 'create'),
('--internal',) if internal else (),
('--subnet', self.assign(prefix=prefix).exploded, name))
self._cmd(' '.join(cmd), context=self._context)
return True
| {
"content_hash": "1b32af5c67e4d04d510f6969db612f23",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 128,
"avg_line_length": 32.52046783625731,
"alnum_prop": 0.6038482287358389,
"repo_name": "thorgate/tg-hammer",
"id": "9f669e7ee8d4d51782b35ea639f1e04ed6df779a",
"size": "5561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hammer/docker_network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "623"
},
{
"name": "Python",
"bytes": "85171"
},
{
"name": "Shell",
"bytes": "1273"
}
],
"symlink_target": ""
} |
"""
Unit tests for gluon.dal (NoSQL adapters)
"""
import sys
import os
import glob
if sys.version < "2.7":
import unittest2 as unittest
else:
import unittest
import datetime
try:
import cStringIO as StringIO
except:
from io import StringIO
def fix_sys_path():
"""
logic to have always the correct sys.path
'', web2py/gluon, web2py/site-packages, web2py/ ...
"""
def add_path_first(path):
sys.path = [path] + [p for p in sys.path if (
not p == path and not p == (path + '/'))]
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
paths = [path,
os.path.abspath(os.path.join(path, 'site-packages')),
os.path.abspath(os.path.join(path, 'gluon')),
'']
[add_path_first(path) for path in paths]
fix_sys_path()
#for travis-ci
DEFAULT_URI = os.environ.get('DB', 'sqlite:memory')
print 'Testing against %s engine (%s)' % (DEFAULT_URI.partition(':')[0], DEFAULT_URI)
IS_GAE = "datastore" in DEFAULT_URI
IS_MONGODB = "mongodb" in DEFAULT_URI
IS_IMAP = "imap" in DEFAULT_URI
if IS_IMAP:
from dal import IMAPAdapter
from contrib import mockimaplib
IMAPAdapter.driver = mockimaplib
from dal import DAL, Field, Table, SQLALL
def drop(table, cascade=None):
# mongodb implements drop()
# although it seems it does not work properly
if (IS_GAE or IS_MONGODB or IS_IMAP):
# GAE drop/cleanup is not implemented
db = table._db
db(table).delete()
del db[table._tablename]
del db.tables[db.tables.index(table._tablename)]
db._remove_references_to(table)
else:
if cascade:
table.drop(cascade)
else:
table.drop()
# setup GAE dummy database
if IS_GAE:
from google.appengine.ext import testbed
gaetestbed = testbed.Testbed()
gaetestbed.activate()
gaetestbed.init_datastore_v3_stub()
ALLOWED_DATATYPES = [
'string',
'text',
'integer',
'boolean',
'double',
'blob',
'date',
'time',
'datetime',
'upload',
'password',
'json',
]
def setUpModule():
pass
def tearDownModule():
if os.path.isfile('sql.log'):
os.unlink('sql.log')
for a in glob.glob('*.table'):
os.unlink(a)
@unittest.skipIf(IS_GAE or IS_IMAP, 'TODO: Datastore throws "AssertionError: SyntaxError not raised"')
class TestFields(unittest.TestCase):
def testFieldName(self):
# Check that Fields cannot start with underscores
self.assertRaises(SyntaxError, Field, '_abc', 'string')
# Check that Fields cannot contain punctuation other than underscores
self.assertRaises(SyntaxError, Field, 'a.bc', 'string')
# Check that Fields cannot be a name of a method or property of Table
for x in ['drop', 'on', 'truncate']:
self.assertRaises(SyntaxError, Field, x, 'string')
# Check that Fields allows underscores in the body of a field name.
self.assert_(Field('a_bc', 'string'),
"Field isn't allowing underscores in fieldnames. It should.")
def testFieldTypes(self):
# Check that string, and password default length is 512
for typ in ['string', 'password']:
self.assert_(Field('abc', typ).length == 512,
"Default length for type '%s' is not 512 or 255" % typ)
# Check that upload default length is 512
self.assert_(Field('abc', 'upload').length == 512,
"Default length for type 'upload' is not 512")
# Check that Tables passed in the type creates a reference
self.assert_(Field('abc', Table(None, 'temp')).type
== 'reference temp',
'Passing an Table does not result in a reference type.')
def testFieldLabels(self):
# Check that a label is successfully built from the supplied fieldname
self.assert_(Field('abc', 'string').label == 'Abc',
'Label built is incorrect')
self.assert_(Field('abc_def', 'string').label == 'Abc Def',
'Label built is incorrect')
def testFieldFormatters(self): # Formatter should be called Validator
# Test the default formatters
for typ in ALLOWED_DATATYPES:
f = Field('abc', typ)
if typ not in ['date', 'time', 'datetime']:
isinstance(f.formatter('test'), str)
else:
isinstance(f.formatter(datetime.datetime.now()), str)
@unittest.skipIf(IS_GAE or IS_MONGODB, 'TODO: Datastore does accept dict objects as json field input. MongoDB assertion error Binary("x", 0) != "x"')
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
for ft in ['string', 'text', 'password', 'upload', 'blob']:
db.define_table('tt', Field('aa', ft, default=''))
self.assertEqual(isinstance(db.tt.insert(aa='x'), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, 'x')
drop(db.tt)
db.define_table('tt', Field('aa', 'integer', default=1))
self.assertEqual(isinstance(db.tt.insert(aa=3), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, 3)
drop(db.tt)
db.define_table('tt', Field('aa', 'double', default=1))
self.assertEqual(isinstance(db.tt.insert(aa=3.1), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, 3.1)
drop(db.tt)
db.define_table('tt', Field('aa', 'boolean', default=True))
self.assertEqual(isinstance(db.tt.insert(aa=True), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, True)
drop(db.tt)
db.define_table('tt', Field('aa', 'json', default={}))
self.assertEqual(isinstance(db.tt.insert(aa={}), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, {})
drop(db.tt)
db.define_table('tt', Field('aa', 'date',
default=datetime.date.today()))
t0 = datetime.date.today()
self.assertEqual(isinstance(db.tt.insert(aa=t0), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, t0)
drop(db.tt)
db.define_table('tt', Field('aa', 'datetime',
default=datetime.datetime.today()))
t0 = datetime.datetime(
1971,
12,
21,
10,
30,
55,
0,
)
self.assertEqual(isinstance(db.tt.insert(aa=t0), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, t0)
## Row APIs
row = db().select(db.tt.aa)[0]
self.assertEqual(db.tt[1].aa,t0)
self.assertEqual(db.tt['aa'],db.tt.aa)
self.assertEqual(db.tt(1).aa,t0)
self.assertTrue(db.tt(1,aa=None)==None)
self.assertFalse(db.tt(1,aa=t0)==None)
self.assertEqual(row.aa,t0)
self.assertEqual(row['aa'],t0)
self.assertEqual(row['tt.aa'],t0)
self.assertEqual(row('tt.aa'),t0)
## Lazy and Virtual fields
db.tt.b = Field.Virtual(lambda row: row.tt.aa)
db.tt.c = Field.Lazy(lambda row: row.tt.aa)
row = db().select(db.tt.aa)[0]
self.assertEqual(row.b,t0)
self.assertEqual(row.c(),t0)
drop(db.tt)
db.define_table('tt', Field('aa', 'time', default='11:30'))
t0 = datetime.time(10, 30, 55)
self.assertEqual(isinstance(db.tt.insert(aa=t0), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, t0)
drop(db.tt)
@unittest.skipIf(IS_GAE or IS_IMAP, 'TODO: Datastore throws "AssertionError: SyntaxError not raised"')
class TestTables(unittest.TestCase):
def testTableNames(self):
# Check that Tables cannot start with underscores
self.assertRaises(SyntaxError, Table, None, '_abc')
# Check that Tables cannot contain punctuation other than underscores
self.assertRaises(SyntaxError, Table, None, 'a.bc')
# Check that Tables cannot be a name of a method or property of DAL
for x in ['define_table', 'tables', 'as_dict']:
self.assertRaises(SyntaxError, Table, None, x)
# Check that Table allows underscores in the body of a field name.
self.assert_(Table(None, 'a_bc'),
"Table isn't allowing underscores in tablename. It should.")
@unittest.skipIf(IS_IMAP, "Skip IMAP")
class TestAll(unittest.TestCase):
def setUp(self):
self.pt = Table(None,'PseudoTable',Field('name'),Field('birthdate'))
def testSQLALL(self):
ans = 'PseudoTable.id, PseudoTable.name, PseudoTable.birthdate'
self.assertEqual(str(SQLALL(self.pt)), ans)
@unittest.skipIf(IS_IMAP, "Skip IMAP")
class TestTable(unittest.TestCase):
def testTableCreation(self):
# Check for error when not passing type other than Field or Table
self.assertRaises(SyntaxError, Table, None, 'test', None)
persons = Table(None, 'persons',
Field('firstname','string'),
Field('lastname', 'string'))
# Does it have the correct fields?
self.assert_(set(persons.fields).issuperset(set(['firstname',
'lastname'])))
# ALL is set correctly
self.assert_('persons.firstname, persons.lastname'
in str(persons.ALL))
@unittest.skipIf(IS_GAE or IS_MONGODB, "No table alias for this backend")
def testTableAlias(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
persons = Table(db, 'persons', Field('firstname',
'string'), Field('lastname', 'string'))
aliens = persons.with_alias('aliens')
# Are the different table instances with the same fields
self.assert_(persons is not aliens)
self.assert_(set(persons.fields) == set(aliens.fields))
def testTableInheritance(self):
persons = Table(None, 'persons', Field('firstname',
'string'), Field('lastname', 'string'))
customers = Table(None, 'customers',
Field('items_purchased', 'integer'),
persons)
self.assert_(set(customers.fields).issuperset(set(
['items_purchased', 'firstname', 'lastname'])))
class TestInsert(unittest.TestCase):
def testRun(self):
if IS_IMAP:
imap = DAL(DEFAULT_URI)
imap.define_tables()
self.assertEqual(imap.Draft.insert(to="[email protected]",
subject="Nurse!",
sender="[email protected]",
content="Nurse!\r\nNurse!"), 2)
self.assertEqual(imap.Draft[2].subject, "Nurse!")
self.assertEqual(imap.Draft[2].sender, "[email protected]")
self.assertEqual(isinstance(imap.Draft[2].uid, long), True)
self.assertEqual(imap.Draft[2].content[0]["text"], "Nurse!\r\nNurse!")
else:
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(db(db.tt.aa == '1').count(), 3)
self.assertEqual(db(db.tt.aa == '2').isempty(), True)
self.assertEqual(db(db.tt.aa == '1').update(aa='2'), 3)
self.assertEqual(db(db.tt.aa == '2').count(), 3)
self.assertEqual(db(db.tt.aa == '2').isempty(), False)
self.assertEqual(db(db.tt.aa == '2').delete(), 3)
self.assertEqual(db(db.tt.aa == '2').isempty(), True)
drop(db.tt)
@unittest.skipIf(IS_GAE or IS_MONGODB or IS_IMAP, 'TODO: Datastore throws "SyntaxError: Not supported (query using or)". MongoDB assertionerror 5L != 3')
class TestSelect(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='2'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='3'), long), True)
self.assertEqual(db(db.tt.id > 0).count(), 3)
self.assertEqual(db(db.tt.id > 0).select(orderby=~db.tt.aa
| db.tt.id)[0].aa, '3')
self.assertEqual(len(db(db.tt.id > 0).select(limitby=(1, 2))), 1)
self.assertEqual(db(db.tt.id > 0).select(limitby=(1, 2))[0].aa,
'2')
self.assertEqual(len(db().select(db.tt.ALL)), 3)
self.assertEqual(db(db.tt.aa == None).count(), 0)
self.assertEqual(db(db.tt.aa != None).count(), 3)
self.assertEqual(db(db.tt.aa > '1').count(), 2)
self.assertEqual(db(db.tt.aa >= '1').count(), 3)
self.assertEqual(db(db.tt.aa == '1').count(), 1)
self.assertEqual(db(db.tt.aa != '1').count(), 2)
self.assertEqual(db(db.tt.aa < '3').count(), 2)
self.assertEqual(db(db.tt.aa <= '3').count(), 3)
self.assertEqual(db(db.tt.aa > '1')(db.tt.aa < '3').count(), 1)
self.assertEqual(db((db.tt.aa > '1') & (db.tt.aa < '3')).count(), 1)
self.assertEqual(db((db.tt.aa > '1') | (db.tt.aa < '3')).count(), 3)
self.assertEqual(db((db.tt.aa > '1') & ~(db.tt.aa > '2')).count(), 1)
self.assertEqual(db(~(db.tt.aa > '1') & (db.tt.aa > '2')).count(), 0)
drop(db.tt)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestAddMethod(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
@db.tt.add_method.all
def select_all(table,orderby=None):
return table._db(table).select(orderby=orderby)
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='2'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='3'), long), True)
self.assertEqual(len(db.tt.all()), 3)
drop(db.tt)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestBelongs(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='2'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='3'), long), True)
self.assertEqual(db(db.tt.aa.belongs(('1', '3'))).count(),
2)
if not (IS_GAE or IS_MONGODB):
self.assertEqual(db(db.tt.aa.belongs(db(db.tt.id > 2)._select(db.tt.aa))).count(), 1)
self.assertEqual(db(db.tt.aa.belongs(db(db.tt.aa.belongs(('1',
'3')))._select(db.tt.aa))).count(), 2)
self.assertEqual(db(db.tt.aa.belongs(db(db.tt.aa.belongs(db
(db.tt.aa.belongs(('1', '3')))._select(db.tt.aa)))._select(
db.tt.aa))).count(),
2)
else:
print "Datastore/Mongodb belongs does not accept queries (skipping)"
drop(db.tt)
@unittest.skipIf(IS_GAE or IS_IMAP, "Contains not supported on GAE Datastore. TODO: IMAP tests")
class TestContains(unittest.TestCase):
@unittest.skipIf(IS_MONGODB, "TODO: MongoDB Contains error")
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa', 'list:string'), Field('bb','string'))
self.assertEqual(isinstance(db.tt.insert(aa=['aaa','bbb'],bb='aaa'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=['bbb','ddd'],bb='abb'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=['eee','aaa'],bb='acc'), long), True)
self.assertEqual(db(db.tt.aa.contains('aaa')).count(), 2)
self.assertEqual(db(db.tt.aa.contains('bbb')).count(), 2)
self.assertEqual(db(db.tt.aa.contains('aa')).count(), 0)
self.assertEqual(db(db.tt.bb.contains('a')).count(), 3)
self.assertEqual(db(db.tt.bb.contains('b')).count(), 1)
self.assertEqual(db(db.tt.bb.contains('d')).count(), 0)
self.assertEqual(db(db.tt.aa.contains(db.tt.bb)).count(), 1)
drop(db.tt)
@unittest.skipIf(IS_GAE or IS_MONGODB or IS_IMAP, "Like not supported on GAE Datastore. TODO: IMAP test")
class TestLike(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
self.assertEqual(isinstance(db.tt.insert(aa='abc'), long), True)
self.assertEqual(db(db.tt.aa.like('a%')).count(), 1)
self.assertEqual(db(db.tt.aa.like('%b%')).count(), 1)
self.assertEqual(db(db.tt.aa.like('%c')).count(), 1)
self.assertEqual(db(db.tt.aa.like('%d%')).count(), 0)
self.assertEqual(db(db.tt.aa.lower().like('A%')).count(), 1)
self.assertEqual(db(db.tt.aa.lower().like('%B%')).count(),
1)
self.assertEqual(db(db.tt.aa.lower().like('%C')).count(), 1)
self.assertEqual(db(db.tt.aa.upper().like('A%')).count(), 1)
self.assertEqual(db(db.tt.aa.upper().like('%B%')).count(),
1)
self.assertEqual(db(db.tt.aa.upper().like('%C')).count(), 1)
drop(db.tt)
db.define_table('tt', Field('aa', 'integer'))
self.assertEqual(isinstance(db.tt.insert(aa=1111111111), long), True)
self.assertEqual(db(db.tt.aa.like('1%')).count(), 1)
self.assertEqual(db(db.tt.aa.like('2%')).count(), 0)
drop(db.tt)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestDatetime(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa', 'datetime'))
self.assertEqual(isinstance(db.tt.insert(aa=datetime.datetime(1971, 12, 21,
11, 30)), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=datetime.datetime(1971, 11, 21,
10, 30)), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=datetime.datetime(1970, 12, 21,
9, 30)), long), True)
self.assertEqual(db(db.tt.aa == datetime.datetime(1971, 12,
21, 11, 30)).count(), 1)
self.assertEqual(db(db.tt.aa >= datetime.datetime(1971, 1, 1)).count(), 2)
drop(db.tt)
@unittest.skipIf(IS_GAE or IS_MONGODB or IS_IMAP, "Expressions are not supported")
class TestExpressions(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa', 'integer'))
self.assertEqual(isinstance(db.tt.insert(aa=1), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=2), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=3), long), True)
self.assertEqual(db(db.tt.aa == 3).update(aa=db.tt.aa + 1), 1)
self.assertEqual(db(db.tt.aa == 4).count(), 1)
self.assertEqual(db(db.tt.aa == -2).count(), 0)
sum = (db.tt.aa + 1).sum()
self.assertEqual(db(db.tt.aa == 2).select(sum).first()[sum], 3)
self.assertEqual(db(db.tt.aa == -2).select(sum).first()[sum], None)
drop(db.tt)
@unittest.skip("JOIN queries are not supported")
class TestJoin(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('t1', Field('aa'))
db.define_table('t2', Field('aa'), Field('b', db.t1))
i1 = db.t1.insert(aa='1')
i2 = db.t1.insert(aa='2')
i3 = db.t1.insert(aa='3')
db.t2.insert(aa='4', b=i1)
db.t2.insert(aa='5', b=i2)
db.t2.insert(aa='6', b=i2)
self.assertEqual(len(db(db.t1.id
== db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)), 3)
self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)[2].t1.aa, '2')
self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)[2].t2.aa, '6')
self.assertEqual(len(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)), 4)
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[2].t1.aa, '2')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[2].t2.aa, '6')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[3].t1.aa, '3')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[3].t2.aa, None)
self.assertEqual(len(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa, groupby=db.t1.aa)),
3)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[0]._extra[db.t2.id.count()],
1)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[1]._extra[db.t2.id.count()],
2)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[2]._extra[db.t2.id.count()],
0)
drop(db.t2)
drop(db.t1)
db.define_table('person',Field('name'))
id = db.person.insert(name="max")
self.assertEqual(id.name,'max')
db.define_table('dog',Field('name'),Field('ownerperson','reference person'))
db.dog.insert(name='skipper',ownerperson=1)
row = db(db.person.id==db.dog.ownerperson).select().first()
self.assertEqual(row[db.person.name],'max')
self.assertEqual(row['person.name'],'max')
drop(db.dog)
self.assertEqual(len(db.person._referenced_by),0)
drop(db.person)
class TestMinMaxSumAvg(unittest.TestCase):
@unittest.skipIf(IS_GAE or IS_MONGODB or IS_IMAP, 'TODO: Datastore throws "AttributeError: Row object has no attribute _extra"')
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa', 'integer'))
self.assertEqual(isinstance(db.tt.insert(aa=1), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=2), long), True)
self.assertEqual(isinstance(db.tt.insert(aa=3), long), True)
s = db.tt.aa.min()
self.assertEqual(db(db.tt.id > 0).select(s)[0]._extra[s], 1)
self.assertEqual(db(db.tt.id > 0).select(s).first()[s], 1)
self.assertEqual(db().select(s).first()[s], 1)
s = db.tt.aa.max()
self.assertEqual(db().select(s).first()[s], 3)
s = db.tt.aa.sum()
self.assertEqual(db().select(s).first()[s], 6)
s = db.tt.aa.count()
self.assertEqual(db().select(s).first()[s], 3)
s = db.tt.aa.avg()
self.assertEqual(db().select(s).first()[s], 2)
drop(db.tt)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestCache(unittest.TestCase):
def testRun(self):
from cache import CacheInRam
cache = CacheInRam()
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
db.tt.insert(aa='1')
r0 = db().select(db.tt.ALL)
r1 = db().select(db.tt.ALL, cache=(cache, 1000))
self.assertEqual(len(r0),len(r1))
r2 = db().select(db.tt.ALL, cache=(cache, 1000))
self.assertEqual(len(r0),len(r2))
r3 = db().select(db.tt.ALL, cache=(cache, 1000), cacheable=True)
self.assertEqual(len(r0),len(r3))
r4 = db().select(db.tt.ALL, cache=(cache, 1000), cacheable=True)
self.assertEqual(len(r0),len(r4))
drop(db.tt)
@unittest.skipIf(IS_IMAP, "Skip IMAP")
class TestMigrations(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'), migrate='.storage.table')
db.commit()
db.close()
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'), Field('b'),
migrate='.storage.table')
db.commit()
db.close()
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'), Field('b', 'text'),
migrate='.storage.table')
db.commit()
db.close()
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'), migrate='.storage.table')
drop(db.tt)
db.commit()
db.close()
def tearDown(self):
if os.path.exists('.storage.db'):
os.unlink('.storage.db')
if os.path.exists('.storage.table'):
os.unlink('.storage.table')
class TestReference(unittest.TestCase):
@unittest.skipIf(IS_MONGODB or IS_IMAP, "TODO: MongoDB assertion error (long object has no attribute id)")
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
if DEFAULT_URI.startswith('mssql'):
#multiple cascade gotcha
for key in ['reference','reference FK']:
db._adapter.types[key]=db._adapter.types[key].replace(
'%(on_delete_action)s','NO ACTION')
db.define_table('tt', Field('name'), Field('aa','reference tt'))
db.commit()
x = db.tt.insert(name='max')
assert isinstance(x.id, long) == True
assert isinstance(x['id'], long) == True
x.aa = x
assert isinstance(x.aa, long) == True
x.update_record()
y = db.tt[x.id]
assert y.aa == x.aa
assert y.aa.aa.aa.aa.aa.aa.name == 'max'
z=db.tt.insert(name='xxx', aa = y)
assert z.aa == y.id
drop(db.tt)
db.commit()
@unittest.skipIf(IS_IMAP, "Skip IMAP")
class TestClientLevelOps(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
db.commit()
db.tt.insert(aa="test")
rows1 = db(db.tt.aa=='test').select()
rows2 = db(db.tt.aa=='test').select()
rows3 = rows1 & rows2
assert len(rows3) == 2
rows4 = rows1 | rows2
assert len(rows4) == 1
rows5 = rows1.find(lambda row: row.aa=="test")
assert len(rows5) == 1
rows6 = rows2.exclude(lambda row: row.aa=="test")
assert len(rows6) == 1
rows7 = rows5.sort(lambda row: row.aa)
assert len(rows7) == 1
drop(db.tt)
db.commit()
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestVirtualFields(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
db.commit()
db.tt.insert(aa="test")
class Compute:
def a_upper(row): return row.tt.aa.upper()
db.tt.virtualfields.append(Compute())
assert db(db.tt.id>0).select().first().a_upper == 'TEST'
drop(db.tt)
db.commit()
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestComputedFields(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt',
Field('aa'),
Field('bb',default='x'),
Field('cc',compute=lambda r: r.aa+r.bb))
db.commit()
id = db.tt.insert(aa="z")
self.assertEqual(db.tt[id].cc,'zx')
drop(db.tt)
db.commit()
# test checking that a compute field can refer to earlier-defined computed fields
db.define_table('tt',
Field('aa'),
Field('bb',default='x'),
Field('cc',compute=lambda r: r.aa+r.bb),
Field('dd',compute=lambda r: r.bb + r.cc))
db.commit()
id = db.tt.insert(aa="z")
self.assertEqual(db.tt[id].dd,'xzx')
drop(db.tt)
db.commit()
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestCommonFilters(unittest.TestCase):
@unittest.skipIf(IS_MONGODB, "TODO: MongoDB Assertion error")
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('t1', Field('aa'))
# db.define_table('t2', Field('aa'), Field('b', db.t1))
i1 = db.t1.insert(aa='1')
i2 = db.t1.insert(aa='2')
i3 = db.t1.insert(aa='3')
# db.t2.insert(aa='4', b=i1)
# db.t2.insert(aa='5', b=i2)
# db.t2.insert(aa='6', b=i2)
db.t1._common_filter = lambda q: db.t1.aa>'1'
self.assertEqual(db(db.t1).count(),2)
# self.assertEqual(db(db.t1).count(),2)
# q = db.t2.b==db.t1.id
# q = db.t1.aa != None
# self.assertEqual(db(q).count(),2)
# self.assertEqual(db(q).count(),2)
# self.assertEqual(len(db(db.t1).select(left=db.t2.on(q))),3)
# db.t2._common_filter = lambda q: db.t2.aa<6
# self.assertEqual(db(q).count(),1)
# self.assertEqual(db(q).count(),1)
# self.assertEqual(len(db(db.t1).select(left=db.t2.on(q))),2)
# drop(db.t2)
drop(db.t1)
@unittest.skipIf(IS_IMAP, "Skip IMAP test")
class TestImportExportFields(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('person', Field('name'))
db.define_table('pet',Field('friend',db.person),Field('name'))
for n in range(2):
db(db.pet).delete()
db(db.person).delete()
for k in range(10):
id = db.person.insert(name=str(k))
db.pet.insert(friend=id,name=str(k))
db.commit()
stream = StringIO.StringIO()
db.export_to_csv_file(stream)
db(db.pet).delete()
db(db.person).delete()
stream = StringIO.StringIO(stream.getvalue())
db.import_from_csv_file(stream)
assert db(db.person).count()==10
assert db(db.pet.name).count()==10
drop(db.pet)
drop(db.person)
db.commit()
@unittest.skipIf(IS_IMAP, "Skip IMAP test")
class TestImportExportUuidFields(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('person', Field('name'),Field('uuid'))
db.define_table('pet',Field('friend',db.person),Field('name'))
for n in range(2):
db(db.pet).delete()
db(db.person).delete()
for k in range(10):
id = db.person.insert(name=str(k),uuid=str(k))
db.pet.insert(friend=id,name=str(k))
db.commit()
stream = StringIO.StringIO()
db.export_to_csv_file(stream)
db(db.person).delete()
db(db.pet).delete()
stream = StringIO.StringIO(stream.getvalue())
db.import_from_csv_file(stream)
assert db(db.person).count()==10
assert db(db.pet).count()==10
drop(db.pet)
drop(db.person)
db.commit()
@unittest.skipIf(IS_IMAP, "Skip IMAP test")
class TestDALDictImportExport(unittest.TestCase):
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('person', Field('name', default="Michael"),Field('uuid'))
db.define_table('pet',Field('friend',db.person),Field('name'))
dbdict = db.as_dict(flat=True, sanitize=False)
assert isinstance(dbdict, dict)
uri = dbdict["uri"]
assert isinstance(uri, basestring) and uri
assert len(dbdict["tables"]) == 2
assert len(dbdict["tables"][0]["fields"]) == 3
assert dbdict["tables"][0]["fields"][1]["type"] == db.person.name.type
assert dbdict["tables"][0]["fields"][1]["default"] == db.person.name.default
db2 = DAL(**dbdict)
assert len(db.tables) == len(db2.tables)
assert hasattr(db2, "pet") and isinstance(db2.pet, Table)
assert hasattr(db2.pet, "friend") and isinstance(db2.pet.friend, Field)
drop(db.pet)
db.commit()
db2.commit()
have_serializers = True
try:
import serializers
dbjson = db.as_json(sanitize=False)
assert isinstance(dbjson, basestring) and len(dbjson) > 0
unicode_keys = True
if sys.version < "2.6.5":
unicode_keys = False
db3 = DAL(**serializers.loads_json(dbjson,
unicode_keys=unicode_keys))
assert hasattr(db3, "person") and hasattr(db3.person, "uuid") and\
db3.person.uuid.type == db.person.uuid.type
drop(db3.person)
db3.commit()
except ImportError:
pass
mpfc = "Monty Python's Flying Circus"
dbdict4 = {"uri": DEFAULT_URI,
"tables":[{"tablename": "tvshow",
"fields": [{"fieldname": "name",
"default":mpfc},
{"fieldname": "rating",
"type":"double"}]},
{"tablename": "staff",
"fields": [{"fieldname": "name",
"default":"Michael"},
{"fieldname": "food",
"default":"Spam"},
{"fieldname": "tvshow",
"type": "reference tvshow"}]}]}
db4 = DAL(**dbdict4)
assert "staff" in db4.tables
assert "name" in db4.staff
assert db4.tvshow.rating.type == "double"
assert (isinstance(db4.tvshow.insert(), long), isinstance(db4.tvshow.insert(name="Loriot"), long),
isinstance(db4.tvshow.insert(name="Il Mattatore"), long)) == (True, True, True)
assert isinstance(db4(db4.tvshow).select().first().id, long) == True
assert db4(db4.tvshow).select().first().name == mpfc
drop(db4.staff)
drop(db4.tvshow)
db4.commit()
dbdict5 = {"uri": DEFAULT_URI}
db5 = DAL(**dbdict5)
assert db5.tables in ([], None)
assert not (str(db5) in ("", None))
dbdict6 = {"uri": DEFAULT_URI,
"tables":[{"tablename": "staff"},
{"tablename": "tvshow",
"fields": [{"fieldname": "name"},
{"fieldname": "rating", "type":"double"}
]
}]
}
db6 = DAL(**dbdict6)
assert len(db6["staff"].fields) == 1
assert "name" in db6["tvshow"].fields
assert db6.staff.insert() is not None
assert isinstance(db6(db6.staff).select().first().id, long) == True
drop(db6.staff)
drop(db6.tvshow)
db6.commit()
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestValidateAndInsert(unittest.TestCase):
def testRun(self):
import datetime
from gluon.validators import IS_INT_IN_RANGE
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('val_and_insert',
Field('aa'),
Field('bb', 'integer',
requires=IS_INT_IN_RANGE(1,5))
)
rtn = db.val_and_insert.validate_and_insert(aa='test1', bb=2)
self.assertEqual(isinstance(rtn.id, long), True)
#errors should be empty
self.assertEqual(len(rtn.errors.keys()), 0)
#this insert won't pass
rtn = db.val_and_insert.validate_and_insert(bb="a")
#the returned id should be None
self.assertEqual(rtn.id, None)
#an error message should be in rtn.errors.bb
self.assertNotEqual(rtn.errors.bb, None)
#cleanup table
drop(db.val_and_insert)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestSelectAsDict(unittest.TestCase):
def testSelect(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table(
'a_table',
Field('b_field'),
Field('a_field'),
)
db.a_table.insert(a_field="aa1", b_field="bb1")
rtn = db(db.a_table).select(db.a_table.id, db.a_table.b_field, db.a_table.a_field).as_list()
self.assertEqual(rtn[0]['b_field'], 'bb1')
keys = rtn[0].keys()
self.assertEqual(len(keys), 3)
self.assertEqual(("id" in keys, "b_field" in keys, "a_field" in keys), (True, True, True))
drop(db.a_table)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestRNameTable(unittest.TestCase):
#tests for highly experimental rname attribute
@unittest.skipIf(IS_MONGODB, "TODO: MongoDB assertion error (long object has no attribute id)")
def testSelect(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
rname = db._adapter.QUOTE_TEMPLATE % 'a very complicated tablename'
db.define_table(
'easy_name',
Field('a_field'),
rname=rname
)
rtn = db.easy_name.insert(a_field='a')
self.assertEqual(isinstance(rtn.id, long), True)
rtn = db(db.easy_name.a_field == 'a').select()
self.assertEqual(len(rtn), 1)
self.assertEqual(isinstance(rtn[0].id, long), True)
self.assertEqual(rtn[0].a_field, 'a')
db.easy_name.insert(a_field='b')
self.assertEqual(db(db.easy_name).count(), 2)
rtn = db(db.easy_name.a_field == 'a').update(a_field='c')
self.assertEqual(rtn, 1)
#clean up
drop(db.easy_name)
@unittest.skip("JOIN queries are not supported")
def testJoin(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
rname = db._adapter.QUOTE_TEMPLATE % 'this is table t1'
rname2 = db._adapter.QUOTE_TEMPLATE % 'this is table t2'
db.define_table('t1', Field('aa'), rname=rname)
db.define_table('t2', Field('aa'), Field('b', db.t1), rname=rname2)
i1 = db.t1.insert(aa='1')
i2 = db.t1.insert(aa='2')
i3 = db.t1.insert(aa='3')
db.t2.insert(aa='4', b=i1)
db.t2.insert(aa='5', b=i2)
db.t2.insert(aa='6', b=i2)
self.assertEqual(len(db(db.t1.id
== db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)), 3)
self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)[2].t1.aa, '2')
self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)[2].t2.aa, '6')
self.assertEqual(len(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)), 4)
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[2].t1.aa, '2')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[2].t2.aa, '6')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[3].t1.aa, '3')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[3].t2.aa, None)
self.assertEqual(len(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa, groupby=db.t1.aa)),
3)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[0]._extra[db.t2.id.count()],
1)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[1]._extra[db.t2.id.count()],
2)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[2]._extra[db.t2.id.count()],
0)
drop(db.t2)
drop(db.t1)
db.define_table('person',Field('name'), rname=rname)
id = db.person.insert(name="max")
self.assertEqual(id.name,'max')
db.define_table('dog',Field('name'),Field('ownerperson','reference person'), rname=rname2)
db.dog.insert(name='skipper',ownerperson=1)
row = db(db.person.id==db.dog.ownerperson).select().first()
self.assertEqual(row[db.person.name],'max')
self.assertEqual(row['person.name'],'max')
drop(db.dog)
self.assertEqual(len(db.person._referenced_by),0)
drop(db.person)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestRNameFields(unittest.TestCase):
# tests for highly experimental rname attribute
@unittest.skipIf(IS_GAE or IS_MONGODB, 'TODO: Datastore throws unsupported error for AGGREGATE. MongoDB assertion error (long object has no attribute id)')
def testSelect(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
rname = db._adapter.QUOTE_TEMPLATE % 'a very complicated fieldname'
rname2 = db._adapter.QUOTE_TEMPLATE % 'rrating from 1 to 10'
db.define_table(
'easy_name',
Field('a_field', rname=rname),
Field('rating', 'integer', rname=rname2, default=2)
)
rtn = db.easy_name.insert(a_field='a')
self.assertEqual(isinstance(rtn.id, long), True)
rtn = db(db.easy_name.a_field == 'a').select()
self.assertEqual(len(rtn), 1)
self.assertEqual(isinstance(rtn[0].id, long), True)
self.assertEqual(rtn[0].a_field, 'a')
db.easy_name.insert(a_field='b')
rtn = db(db.easy_name.id > 0).delete()
self.assertEqual(rtn, 2)
rtn = db(db.easy_name.id > 0).count()
self.assertEqual(rtn, 0)
db.easy_name.insert(a_field='a')
db.easy_name.insert(a_field='b')
rtn = db(db.easy_name.id > 0).count()
self.assertEqual(rtn, 2)
rtn = db(db.easy_name.a_field == 'a').update(a_field='c')
rtn = db(db.easy_name.a_field == 'c').count()
self.assertEqual(rtn, 1)
rtn = db(db.easy_name.a_field != 'c').count()
self.assertEqual(rtn, 1)
avg = db.easy_name.id.avg()
rtn = db(db.easy_name.id > 0).select(avg)
self.assertEqual(rtn[0][avg], 3)
avg = db.easy_name.rating.avg()
rtn = db(db.easy_name.id > 0).select(avg)
self.assertEqual(rtn[0][avg], 2)
rname = db._adapter.QUOTE_TEMPLATE % 'this is the person name'
db.define_table(
'person',
Field('name', default="Michael", rname=rname),
Field('uuid')
)
michael = db.person.insert() #default insert
john = db.person.insert(name='John')
luke = db.person.insert(name='Luke')
self.assertEqual(len(rtn), 3)
self.assertEqual(rtn[0].id, michael)
self.assertEqual(rtn[0].name, 'Michael')
self.assertEqual(rtn[1].id, john)
self.assertEqual(rtn[1].name, 'John')
#fetch owners, eventually with pet
#main point is retrieving Luke with no pets
rtn = db(db.person.id > 0).select()
self.assertEqual(rtn[0].id, michael)
self.assertEqual(rtn[0].name, 'Michael')
self.assertEqual(rtn[3].name, 'Luke')
self.assertEqual(rtn[3].id, luke)
#as dict
rtn = db(db.person.id > 0).select().as_dict()
self.assertEqual(rtn[1]['name'], 'Michael')
#as list
rtn = db(db.person.id > 0).select().as_list()
self.assertEqual(rtn[0]['name'], 'Michael')
#isempty
rtn = db(db.person.id > 0).isempty()
self.assertEqual(rtn, False)
#aliases
rname = db._adapter.QUOTE_TEMPLATE % 'the cub name'
if DEFAULT_URI.startswith('mssql'):
#multiple cascade gotcha
for key in ['reference','reference FK']:
db._adapter.types[key]=db._adapter.types[key].replace(
'%(on_delete_action)s','NO ACTION')
db.define_table('pet_farm',
Field('name', rname=rname),
Field('father','reference pet_farm'),
Field('mother','reference pet_farm'),
)
minali = db.pet_farm.insert(name='Minali')
osbert = db.pet_farm.insert(name='Osbert')
#they had a cub
selina = db.pet_farm.insert(name='Selina', father=osbert, mother=minali)
father = db.pet_farm.with_alias('father')
mother = db.pet_farm.with_alias('mother')
#fetch pets with relatives
rtn = db().select(
db.pet_farm.name, father.name, mother.name,
left=[
father.on(father.id == db.pet_farm.father),
mother.on(mother.id == db.pet_farm.mother)
],
orderby=db.pet_farm.id
)
self.assertEqual(len(rtn), 3)
self.assertEqual(rtn[0].pet_farm.name, 'Minali')
self.assertEqual(rtn[0].father.name, None)
self.assertEqual(rtn[0].mother.name, None)
self.assertEqual(rtn[1].pet_farm.name, 'Osbert')
self.assertEqual(rtn[2].pet_farm.name, 'Selina')
self.assertEqual(rtn[2].father.name, 'Osbert')
self.assertEqual(rtn[2].mother.name, 'Minali')
#clean up
drop(db.pet_farm)
drop(db.person)
drop(db.easy_name)
@unittest.skipIf(IS_GAE or IS_MONGODB, 'TODO: Datastore does not accept dict objects as json field input. MongoDB assertionerror Binary("x", 0) != "x"')
def testRun(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
rname = db._adapter.QUOTE_TEMPLATE % 'a very complicated fieldname'
for ft in ['string', 'text', 'password', 'upload', 'blob']:
db.define_table('tt', Field('aa', ft, default='', rname=rname))
self.assertEqual(isinstance(db.tt.insert(aa='x'), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, 'x')
drop(db.tt)
db.define_table('tt', Field('aa', 'integer', default=1, rname=rname))
self.assertEqual(isinstance(db.tt.insert(aa=3), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, 3)
drop(db.tt)
db.define_table('tt', Field('aa', 'double', default=1, rname=rname))
self.assertEqual(isinstance(db.tt.insert(aa=3.1), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, 3.1)
drop(db.tt)
db.define_table('tt', Field('aa', 'boolean', default=True, rname=rname))
self.assertEqual(isinstance(db.tt.insert(aa=True), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, True)
drop(db.tt)
db.define_table('tt', Field('aa', 'json', default={}, rname=rname))
self.assertEqual(isinstance(db.tt.insert(aa={}), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, {})
drop(db.tt)
db.define_table('tt', Field('aa', 'date',
default=datetime.date.today(), rname=rname))
t0 = datetime.date.today()
self.assertEqual(isinstance(db.tt.insert(aa=t0), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, t0)
drop(db.tt)
db.define_table('tt', Field('aa', 'datetime',
default=datetime.datetime.today(), rname=rname))
t0 = datetime.datetime(
1971,
12,
21,
10,
30,
55,
0,
)
self.assertEqual(db.tt.insert(aa=t0), 1)
self.assertEqual(db().select(db.tt.aa)[0].aa, t0)
## Row APIs
row = db().select(db.tt.aa)[0]
self.assertEqual(db.tt[1].aa,t0)
self.assertEqual(db.tt['aa'],db.tt.aa)
self.assertEqual(db.tt(1).aa,t0)
self.assertTrue(db.tt(1,aa=None)==None)
self.assertFalse(db.tt(1,aa=t0)==None)
self.assertEqual(row.aa,t0)
self.assertEqual(row['aa'],t0)
self.assertEqual(row['tt.aa'],t0)
self.assertEqual(row('tt.aa'),t0)
## Lazy and Virtual fields
db.tt.b = Field.Virtual(lambda row: row.tt.aa)
db.tt.c = Field.Lazy(lambda row: row.tt.aa)
row = db().select(db.tt.aa)[0]
self.assertEqual(row.b,t0)
self.assertEqual(row.c(),t0)
drop(db.tt)
db.define_table('tt', Field('aa', 'time', default='11:30', rname=rname))
t0 = datetime.time(10, 30, 55)
self.assertEqual(isinstance(db.tt.insert(aa=t0), long), True)
self.assertEqual(db().select(db.tt.aa)[0].aa, t0)
drop(db.tt)
def testInsert(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
rname = db._adapter.QUOTE_TEMPLATE % 'a very complicated fieldname'
db.define_table('tt', Field('aa', rname=rname))
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(isinstance(db.tt.insert(aa='1'), long), True)
self.assertEqual(db(db.tt.aa == '1').count(), 3)
self.assertEqual(db(db.tt.aa == '2').isempty(), True)
self.assertEqual(db(db.tt.aa == '1').update(aa='2'), 3)
self.assertEqual(db(db.tt.aa == '2').count(), 3)
self.assertEqual(db(db.tt.aa == '2').isempty(), False)
self.assertEqual(db(db.tt.aa == '2').delete(), 3)
self.assertEqual(db(db.tt.aa == '2').isempty(), True)
drop(db.tt)
@unittest.skip("JOIN queries are not supported")
def testJoin(self):
db = DAL(DEFAULT_URI, check_reserved=['all'])
rname = db._adapter.QUOTE_TEMPLATE % 'this is field aa'
rname2 = db._adapter.QUOTE_TEMPLATE % 'this is field b'
db.define_table('t1', Field('aa', rname=rname))
db.define_table('t2', Field('aa', rname=rname), Field('b', db.t1, rname=rname2))
i1 = db.t1.insert(aa='1')
i2 = db.t1.insert(aa='2')
i3 = db.t1.insert(aa='3')
db.t2.insert(aa='4', b=i1)
db.t2.insert(aa='5', b=i2)
db.t2.insert(aa='6', b=i2)
self.assertEqual(len(db(db.t1.id
== db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)), 3)
self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)[2].t1.aa, '2')
self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.aa
| db.t2.aa)[2].t2.aa, '6')
self.assertEqual(len(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)), 4)
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[2].t1.aa, '2')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[2].t2.aa, '6')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[3].t1.aa, '3')
self.assertEqual(db().select(db.t1.ALL, db.t2.ALL,
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa | db.t2.aa)[3].t2.aa, None)
self.assertEqual(len(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa, groupby=db.t1.aa)),
3)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[0]._extra[db.t2.id.count()],
1)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[1]._extra[db.t2.id.count()],
2)
self.assertEqual(db().select(db.t1.aa, db.t2.id.count(),
left=db.t2.on(db.t1.id == db.t2.b),
orderby=db.t1.aa,
groupby=db.t1.aa)[2]._extra[db.t2.id.count()],
0)
drop(db.t2)
drop(db.t1)
db.define_table('person',Field('name', rname=rname))
id = db.person.insert(name="max")
self.assertEqual(id.name,'max')
db.define_table('dog',Field('name', rname=rname),Field('ownerperson','reference person', rname=rname2))
db.dog.insert(name='skipper',ownerperson=1)
row = db(db.person.id==db.dog.ownerperson).select().first()
self.assertEqual(row[db.person.name],'max')
self.assertEqual(row['person.name'],'max')
drop(db.dog)
self.assertEqual(len(db.person._referenced_by),0)
drop(db.person)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestQuoting(unittest.TestCase):
# tests for case sensitivity
def testCase(self):
return
db = DAL(DEFAULT_URI, check_reserved=['all'], ignore_field_case=False)
if DEFAULT_URI.startswith('mssql'):
#multiple cascade gotcha
for key in ['reference','reference FK']:
db._adapter.types[key]=db._adapter.types[key].replace(
'%(on_delete_action)s','NO ACTION')
# test table case
t0 = db.define_table('B',
Field('f', 'string'))
try:
t1 = db.define_table('b',
Field('B', t0),
Field('words', 'text'))
except Exception, e:
# An error is expected when database does not support case
# sensitive entity names.
if DEFAULT_URI.startswith('sqlite:'):
self.assertTrue(isinstance(e, db._adapter.driver.OperationalError))
return
raise e
blather = 'blah blah and so'
t0[0] = {'f': 'content'}
t1[0] = {'B': int(t0[1]['id']),
'words': blather}
r = db(db.B.id==db.b.B).select()
self.assertEqual(r[0].b.words, blather)
drop(t1)
drop(t0)
# test field case
try:
t0 = db.define_table('table is a test',
Field('a_a'),
Field('a_A'))
except Exception, e:
# some db does not support case sensitive field names mysql is one of them.
if DEFAULT_URI.startswith('mysql:'):
db.rollback()
return
raise e
t0[0] = dict(a_a = 'a_a', a_A='a_A')
self.assertEqual(t0[1].a_a, 'a_a')
self.assertEqual(t0[1].a_A, 'a_A')
drop(t0)
def testPKFK(self):
# test primary keys
db = DAL(DEFAULT_URI, check_reserved=['all'], ignore_field_case=False)
if DEFAULT_URI.startswith('mssql'):
#multiple cascade gotcha
for key in ['reference','reference FK']:
db._adapter.types[key]=db._adapter.types[key].replace(
'%(on_delete_action)s','NO ACTION')
# test table without surrogate key. Length must is limited to
# 100 because of MySQL limitations: it cannot handle more than
# 767 bytes in unique keys.
t0 = db.define_table('t0', Field('Code', length=100), primarykey=['Code'])
t2 = db.define_table('t2', Field('f'), Field('t0_Code', 'reference t0'))
t3 = db.define_table('t3', Field('f', length=100), Field('t0_Code', t0.Code), primarykey=['f'])
t4 = db.define_table('t4', Field('f', length=100), Field('t0', t0), primarykey=['f'])
try:
t5 = db.define_table('t5', Field('f', length=100), Field('t0', 'reference no_table_wrong_reference'), primarykey=['f'])
except Exception, e:
self.assertTrue(isinstance(e, KeyError))
if DEFAULT_URI.startswith('mssql'):
#there's no drop cascade in mssql
drop(t3)
drop(t4)
drop(t2)
drop(t0)
else:
drop(t0, 'cascade')
drop(t2)
drop(t3)
drop(t4)
class TestTableAndFieldCase(unittest.TestCase):
"""
at the Python level we should not allow db.C and db.c because of .table conflicts on windows
but it should be possible to map two different names into distinct tables "c" and "C" at the Python level
By default Python models names should be mapped into lower case table names and assume case insensitivity.
"""
def testme(self):
return
class TestQuotesByDefault(unittest.TestCase):
"""
all default tables names should be quoted unless an explicit mapping has been given for a table.
"""
def testme(self):
return
if __name__ == '__main__':
unittest.main()
tearDownModule()
| {
"content_hash": "590c70f2ae532e209b4fbf335d97fd7f",
"timestamp": "",
"source": "github",
"line_count": 1430,
"max_line_length": 159,
"avg_line_length": 41.42517482517483,
"alnum_prop": 0.5448192038893953,
"repo_name": "uwdata/termite-data-server",
"id": "a4eb0dadf49f7d910d4698f53962f9b99d6a19a4",
"size": "59284",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "web2py/gluon/tests/test_dal_nosql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "717883"
},
{
"name": "Java",
"bytes": "4515"
},
{
"name": "JavaScript",
"bytes": "2239861"
},
{
"name": "Perl",
"bytes": "2402"
},
{
"name": "Python",
"bytes": "6125625"
},
{
"name": "Shell",
"bytes": "104651"
}
],
"symlink_target": ""
} |
"""
This module provides utility functions for the models package
"""
from collections import deque, MutableMapping
from inspect import signature
import numpy as np
from ..utils import isiterable, check_broadcast
from ..utils.compat import NUMPY_LT_1_14
from .. import units as u
__all__ = ['ExpressionTree', 'AliasDict', 'check_broadcast',
'poly_map_domain', 'comb', 'ellipse_extent']
class ExpressionTree:
__slots__ = ['left', 'right', 'value']
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
# Two subtrees can't be the same *object* or else traverse_postorder
# breaks, so we just always copy the right subtree to subvert that.
if right is not None and left is right:
right = right.copy()
self.right = right
def __getstate__(self):
# For some reason the default pickle protocol on Python 2 does not just
# do this. On Python 3 it's not a problem.
return dict((slot, getattr(self, slot)) for slot in self.__slots__)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
@property
def isleaf(self):
return self.left is None and self.right is None
def traverse_preorder(self):
stack = deque([self])
while stack:
node = stack.pop()
yield node
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
def traverse_inorder(self):
stack = deque()
node = self
while stack or node is not None:
if node is not None:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node
node = node.right
def traverse_postorder(self):
stack = deque([self])
last = None
while stack:
node = stack[-1]
if last is None or node is last.left or node is last.right:
if node.left is not None:
stack.append(node.left)
elif node.right is not None:
stack.append(node.right)
elif node.left is last and node.right is not None:
stack.append(node.right)
else:
yield stack.pop()
last = node
def evaluate(self, operators, getter=None, start=0, stop=None):
"""Evaluate the expression represented by this tree.
``Operators`` should be a dictionary mapping operator names ('tensor',
'product', etc.) to a function that implements that operator for the
correct number of operands.
If given, ``getter`` is a function evaluated on each *leaf* node's
value before applying the operator between them. This could be used,
for example, to operate on an attribute of the node values rather than
directly on the node values. The ``getter`` is passed both the index
of the leaf (a count starting at 0 that is incremented after each leaf
is found) and the leaf node itself.
The ``start`` and ``stop`` arguments allow evaluating a sub-expression
within the expression tree.
TODO: Document this better.
"""
stack = deque()
if getter is None:
getter = lambda idx, value: value
if start is None:
start = 0
leaf_idx = 0
for node in self.traverse_postorder():
if node.isleaf:
# For a "tree" containing just a single operator at the root
# Also push the index of this leaf onto the stack, which will
# prove useful for evaluating subexpressions
stack.append((getter(leaf_idx, node.value), leaf_idx))
leaf_idx += 1
else:
operator = operators[node.value]
if len(stack) < 2:
# Skip this operator if there are not enough operands on
# the stack; this can happen if some operands were skipped
# when evaluating a sub-expression
continue
right = stack.pop()
left = stack.pop()
operands = []
for operand in (left, right):
# idx is the leaf index; -1 if not a leaf node
if operand[-1] == -1:
operands.append(operand)
else:
operand, idx = operand
if start <= idx and (stop is None or idx < stop):
operands.append((operand, idx))
if len(operands) == 2:
# evaluate the operator with the given operands and place
# the result on the stack (with -1 for the "leaf index"
# since this result is not a leaf node
left, right = operands
stack.append((operator(left[0], right[0]), -1))
elif len(operands) == 0:
# Just push the left one back on the stack
# TODO: Explain and/or refactor this better
# This is here because even if both operands were "skipped"
# due to being outside the (start, stop) range, we've only
# skipped one operator. But there should be at least 2
# operators involving these operands, so we push the one
# from the left back onto the stack so that the next
# operator will be skipped as well. Should probably come
# up with an easier to follow way to write this algorithm
stack.append(left)
else:
# one or more of the operands was not included in the
# sub-expression slice, so don't evaluate the operator;
# instead place left over operands (if any) back on the
# stack for later use
stack.extend(operands)
return stack.pop()[0]
def copy(self):
# Hopefully this won't blow the stack for any practical case; if such a
# case arises that this won't work then I suppose we can find an
# iterative approach.
children = []
for child in (self.left, self.right):
if isinstance(child, ExpressionTree):
children.append(child.copy())
else:
children.append(child)
return self.__class__(self.value, left=children[0], right=children[1])
def format_expression(self, operator_precedence, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: '[{0}]'.format(i)
for node in self.traverse_postorder():
if node.isleaf:
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
oper_order = operator_precedence[node.value]
right = operands.pop()
left = operands.pop()
if (node.left is not None and not node.left.isleaf and
operator_precedence[node.left.value] < oper_order):
left = '({0})'.format(left)
if (node.right is not None and not node.right.isleaf and
operator_precedence[node.right.value] < oper_order):
right = '({0})'.format(right)
operands.append(' '.join((left, node.value, right)))
return ''.join(operands)
class AliasDict(MutableMapping):
"""
Creates a `dict` like object that wraps an existing `dict` or other
`MutableMapping`, along with a `dict` of *key aliases* that translate
between specific keys in this dict to different keys in the underlying
dict.
In other words, keys that do not have an associated alias are accessed and
stored like a normal `dict`. However, a key that has an alias is accessed
and stored to the "parent" dict via the alias.
Parameters
----------
parent : dict-like
The parent `dict` that aliased keys and accessed from and stored to.
aliases : dict-like
Maps keys in this dict to their associated keys in the parent dict.
Examples
--------
>>> parent = {'a': 1, 'b': 2, 'c': 3}
>>> aliases = {'foo': 'a', 'bar': 'c'}
>>> alias_dict = AliasDict(parent, aliases)
>>> alias_dict['foo']
1
>>> alias_dict['bar']
3
Keys in the original parent dict are not visible if they were not
aliased::
>>> alias_dict['b']
Traceback (most recent call last):
...
KeyError: 'b'
Likewise, updates to aliased keys are reflected back in the parent dict::
>>> alias_dict['foo'] = 42
>>> alias_dict['foo']
42
>>> parent['a']
42
However, updates/insertions to keys that are *not* aliased are not
reflected in the parent dict::
>>> alias_dict['qux'] = 99
>>> alias_dict['qux']
99
>>> 'qux' in parent
False
In particular, updates on the `AliasDict` to a key that is equal to
one of the aliased keys in the parent dict does *not* update the parent
dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But
assigning to a key ``'a'`` on the `AliasDict` does not impact the
parent::
>>> alias_dict['a'] = 'nope'
>>> alias_dict['a']
'nope'
>>> parent['a']
42
"""
_store_type = dict
"""
Subclasses may override this to use other mapping types as the underlying
storage, for example an `OrderedDict`. However, even in this case
additional work may be needed to get things like the ordering right.
"""
def __init__(self, parent, aliases):
self._parent = parent
self._store = self._store_type()
self._aliases = dict(aliases)
def __getitem__(self, key):
if key in self._aliases:
try:
return self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
return self._store[key]
def __setitem__(self, key, value):
if key in self._aliases:
self._parent[self._aliases[key]] = value
else:
self._store[key] = value
def __delitem__(self, key):
if key in self._aliases:
try:
del self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
else:
del self._store[key]
def __iter__(self):
"""
First iterates over keys from the parent dict (if the aliased keys are
present in the parent), followed by any keys in the local store.
"""
for key, alias in self._aliases.items():
if alias in self._parent:
yield key
for key in self._store:
yield key
def __len__(self):
# TODO:
# This could be done more efficiently, but at present the use case for
# it is narrow if non-existent.
return len(list(iter(self)))
def __repr__(self):
# repr() just like any other dict--this should look transparent
store_copy = self._store_type()
for key, alias in self._aliases.items():
if alias in self._parent:
store_copy[key] = self._parent[alias]
store_copy.update(self._store)
return repr(store_copy)
class _BoundingBox(tuple):
"""
Base class for models with custom bounding box templates (methods that
return an actual bounding box tuple given some adjustable parameters--see
for example `~astropy.modeling.models.Gaussian1D.bounding_box`).
On these classes the ``bounding_box`` property still returns a `tuple`
giving the default bounding box for that instance of the model. But that
tuple may also be a subclass of this class that is callable, and allows
a new tuple to be returned using a user-supplied value for any adjustable
parameters to the bounding box.
"""
_model = None
def __new__(cls, input_, _model=None):
self = super().__new__(cls, input_)
if _model is not None:
# Bind this _BoundingBox (most likely a subclass) to a Model
# instance so that its __call__ can access the model
self._model = _model
return self
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters.")
@classmethod
def validate(cls, model, bounding_box):
"""
Validate a given bounding box sequence against the given model (which
may be either a subclass of `~astropy.modeling.Model` or an instance
thereof, so long as the ``.inputs`` attribute is defined.
Currently this just checks that the bounding_box is either a 2-tuple
of lower and upper bounds for 1-D models, or an N-tuple of 2-tuples
for N-D models.
This also returns a normalized version of the bounding_box input to
ensure it is always an N-tuple (even for the 1-D case).
"""
nd = model.n_inputs
if nd == 1:
if (not isiterable(bounding_box)
or np.shape(bounding_box) not in ((2,), (1, 2))):
raise ValueError(
"Bounding box for {0} model must be a sequence of length "
"2 consisting of a lower and upper bound, or a 1-tuple "
"containing such a sequence as its sole element.".format(
model.name))
if len(bounding_box) == 1:
return cls((tuple(bounding_box[0]),))
else:
return cls(tuple(bounding_box))
else:
if (not isiterable(bounding_box)
or np.shape(bounding_box) != (nd, 2)):
raise ValueError(
"Bounding box for {0} model must be a sequence of length "
"{1} (the number of model inputs) consisting of pairs of "
"lower and upper bounds for those inputs on which to "
"evaluate the model.".format(model.name, nd))
return cls(tuple(bounds) for bounds in bounding_box)
def make_binary_operator_eval(oper, f, g):
"""
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,)
"""
return lambda inputs, params: \
tuple(oper(x, y) for x, y in zip(f(inputs, params),
g(inputs, params)))
def poly_map_domain(oldx, domain, window):
"""
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain
"""
domain = np.array(domain, dtype=np.float64)
window = np.array(window, dtype=np.float64)
scl = (window[1] - window[0]) / (domain[1] - domain[0])
off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0])
return off + scl * oldx
def comb(N, k):
"""
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
"""
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in range(min(k, N - k)):
val = (val * (N - j)) / (j + 1)
return val
def array_repr_oneline(array):
"""
Represents a multi-dimensional Numpy array flattened onto a single line.
"""
sep = ',' if NUMPY_LT_1_14 else ', '
r = np.array2string(array, separator=sep, suppress_small=True)
return ' '.join(l.strip() for l in r.splitlines())
def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(l + '0' for l in left)
right = tuple(r + '1' for r in right)
return left + right
def ellipse_extent(a, b, theta):
"""
Calculates the extent of a box encapsulating a rotated 2D ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
Major axis.
b : float or `~astropy.units.Quantity`
Minor axis.
theta : float or `~astropy.units.Quantity`
Rotation angle. If given as a floating-point value, it is assumed to be
in radians.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi/4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show()
"""
t = np.arctan2(-b * np.tan(theta), a)
dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta)
t = np.arctan2(b, a * np.tan(theta))
dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta)
if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity):
return np.abs(u.Quantity([dx, dy]))
else:
return np.abs([dx, dy])
def get_inputs_and_params(func):
"""
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects
"""
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("Signature must not have *args or **kwargs")
if param.default == param.empty:
inputs.append(param)
else:
params.append(param)
return inputs, params
def _parameter_with_unit(parameter, unit):
if parameter.unit is None:
return parameter.value * unit
else:
return parameter.quantity.to(unit)
def _parameter_without_unit(value, old_unit, new_unit):
if old_unit is None:
return value
else:
return value * old_unit.to(new_unit)
def _combine_equivalency_dict(keys, eq1=None, eq2=None):
# Given two dictionaries that give equivalencies for a set of keys, for
# example input value names, return a dictionary that includes all the
# equivalencies
eq = {}
for key in keys:
eq[key] = []
if eq1 is not None and key in eq1:
eq[key].extend(eq1[key])
if eq2 is not None and key in eq2:
eq[key].extend(eq2[key])
return eq
def _to_radian(value):
""" Convert ``value`` to radian. """
if isinstance(value, u.Quantity):
return value.to(u.rad)
else:
return np.deg2rad(value)
def _to_orig_unit(value, raw_unit=None, orig_unit=None):
""" Convert value with ``raw_unit`` to ``orig_unit``. """
if raw_unit is not None:
return (value * raw_unit).to(orig_unit)
else:
return np.rad2deg(value)
| {
"content_hash": "7d803d67fd34b2d045abfb88d0d53e09",
"timestamp": "",
"source": "github",
"line_count": 647,
"max_line_length": 83,
"avg_line_length": 32.304482225656876,
"alnum_prop": 0.5675326539400029,
"repo_name": "funbaker/astropy",
"id": "614cc27f615e102ee72f56c4085ae86b063f2741",
"size": "20966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astropy/modeling/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "367279"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "8331581"
},
{
"name": "TeX",
"bytes": "805"
}
],
"symlink_target": ""
} |
import inspect
import os
import sys
import django
from django.utils.encoding import force_str
from django.utils.html import strip_tags
from oscar import get_version, get_short_version
oscar_folder = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../..'))
sandbox_folder = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../sandbox'))
sys.path.append(oscar_folder)
sys.path.append(sandbox_folder)
# Specify settings module (which will be picked up from the sandbox)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings_sphinx')
django.setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
'sphinxcontrib.spelling',
'sphinx_issues',
]
# Going with British English as the default because of history of the project
spelling_lang = "en_GB"
spelling_word_list_filename = "spelling_wordlist.txt"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-oscar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
release = get_short_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_draft']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# Github repo for sphinx-issues
issues_github_path = 'django-oscar/django-oscar'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Use RTD theme locally
html_theme = 'default'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-oscardoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
'index',
'django-oscar.tex',
'django-oscar Documentation',
'David Winterbottom',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'index',
'django-oscar',
'django-oscar Documentation',
['David Winterbottom'],
1,
),
]
# Autodoc settings
autoclass_content = 'class'
# Better documenting of Django models
# See http://djangosnippets.org/snippets/2533/
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Ignore abstract models
if not hasattr(obj._meta, '_fields'):
return lines
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_str(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_str(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(':type %s: %s' % (field.attname, type(field).__name__))
# Return the extended docstring
return lines
def setup(app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| {
"content_hash": "59f2eb2d44e87cf51fbc9247636b8500",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 80,
"avg_line_length": 31.348993288590606,
"alnum_prop": 0.680796403339756,
"repo_name": "solarissmoke/django-oscar",
"id": "211d023f1499403493189a489021fda3d8b78e24",
"size": "10099",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "562906"
},
{
"name": "JavaScript",
"bytes": "40879"
},
{
"name": "Makefile",
"bytes": "4234"
},
{
"name": "Python",
"bytes": "2199293"
},
{
"name": "SCSS",
"bytes": "21362"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0012_auto_20190408_1820'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='social_discord',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Discord-tag'),
),
]
| {
"content_hash": "482778d8c10060dd5ffb91d2f99eb754",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 101,
"avg_line_length": 25.0625,
"alnum_prop": 0.6034912718204489,
"repo_name": "hackerspace-ntnu/website",
"id": "c37feddbea55cae5477a39625ff44dd7c28b241d",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/migrations/0013_auto_20190408_1825.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.