code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import pytest
class TestNcftp:
@pytest.mark.complete("ncftp ")
def test_1(self, completion):
assert completion
@pytest.mark.complete("ncftp -", require_cmd=True)
def test_2(self, completion):
assert completion
| algorythmic/bash-completion | test/t/test_ncftp.py | Python | gpl-2.0 | 245 |
###############################################################################
# Name: nsistags.py #
# Purpose: Generate Tags for Nullsoft Installer Scripts #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: nsistags.py
AUTHOR: Cody Precord
LANGUAGE: Python
SUMMARY:
Generate a DocStruct object that captures the structure of a NSIS Script. It
currently supports generating tags for Sections, Functions, and Macro defs.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: nsistags.py 52675 2008-03-22 03:34:38Z CJP $"
__revision__ = "$Revision: 52675 $"
#--------------------------------------------------------------------------#
# Dependancies
import taglib
import parselib
#--------------------------------------------------------------------------#
def GenerateTags(buff):
"""Create a DocStruct object that represents a NSIS Script
@param buff: a file like buffer object (StringIO)
@todo: generate tags for lua tables?
"""
rtags = taglib.DocStruct()
# Set Descriptions of Document Element Types
rtags.SetElementDescription('variable', "Defines")
rtags.SetElementDescription('section', "Section Definitions")
rtags.SetElementDescription('macro', "Macro Definitions")
rtags.SetElementDescription('function', "Function Definitions")
rtags.SetElementPriority('variable', 4)
rtags.SetElementPriority('section', 3)
rtags.SetElementPriority('function', 2)
rtags.SetElementPriority('macro', 1)
# Parse the lines for code objects
for lnum, line in enumerate(buff):
line = line.strip()
llen = len(line)
# Skip comment and empty lines
if line.startswith(u"#") or line.startswith(u";") or not line:
continue
# Look for functions and sections
if parselib.IsToken(line, 0, u'Function'):
parts = line.split()
if len(parts) > 1:
rtags.AddFunction(taglib.Function(parts[1], lnum))
elif parselib.IsToken(line, 0, u'Section'):
parts = line.split()
if len(parts) > 1 and parts[1][0] not in ['"', "'", "`"]:
rtags.AddElement('section', taglib.Section(parts[1], lnum))
else:
for idx, part in enumerate(parts[1:]):
if parts[idx][-1] in ['"', "'", "`"]:
rtags.AddElement('section', taglib.Section(part, lnum))
break
elif parselib.IsToken(line, 0, u'!macro'):
parts = line.split()
if len(parts) > 1:
rtags.AddElement('macro', taglib.Macro(parts[1], lnum))
elif parselib.IsToken(line, 0, u'!define'):
parts = line.split()
if len(parts) > 1 and parts[1][0].isalpha():
rtags.AddVariable(taglib.Variable(parts[1], lnum))
else:
continue
return rtags
#-----------------------------------------------------------------------------#
# Test
if __name__ == '__main__':
import sys
import StringIO
fhandle = open(sys.argv[1])
txt = fhandle.read()
fhandle.close()
tags = GenerateTags(StringIO.StringIO(txt))
print "\n\nElements:"
for element in tags.GetElements():
print "\n%s:" % element.keys()[0]
for val in element.values()[0]:
print "%s [%d]" % (val.GetName(), val.GetLine())
print "END"
| garrettcap/Bulletproof-Backup | wx/tools/Editra/plugins/codebrowser/codebrowser/gentag/nsistags.py | Python | gpl-2.0 | 3,752 |
# Standard
import os
import sys
# Third Party
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pyfilm as pf
from skimage.measure import label
from skimage import filters
plt.rcParams.update({'figure.autolayout': True})
mpl.rcParams['axes.unicode_minus'] = False
#local
from run import Run
import plot_style
plot_style.white()
pal = sns.color_palette('deep')
def structure_analysis(run, perc_thresh, create_film=False):
"""
Calculates the number of structures as a function of time for a given
percentile cut-off. Writes results and plots to an appropriate directory.
Parameters
----------
run : object
Run object calculated by the Run class.
perc_thresh : int
Percentile threshold at which to cut off fluctuations.
create_film : bool
Determines whether a film of the labelled structures is produced.
"""
run.read_ntot()
make_results_dir(run, perc_thresh)
labelled_image, nlabels = label_structures(run, perc_thresh)
no_structures = count_structures(run, labelled_image, nlabels)
plot_no_structures(run, no_structures, perc_thresh)
save_results(run, no_structures, perc_thresh)
if create_film:
make_film(run, no_structures, labelled_image, perc_thresh)
def make_results_dir(run, perc_thresh):
os.system('mkdir -p ' + run.run_dir + 'analysis/structures_' +
str(perc_thresh))
def label_structures(run, perc_thresh):
nlabels = np.empty(run.nt, dtype=int)
labelled_image = np.empty([run.nt, run.nx, run.ny], dtype=int)
for it in range(run.nt):
tmp = run.ntot_i[it,:,:].copy()
# Apply Gaussian filter
tmp = filters.gaussian(tmp, sigma=1)
thresh = np.percentile(tmp, perc_thresh,
interpolation='nearest')
tmp_max = np.max(tmp)
tmp_thresh = thresh/tmp_max
tmp /= tmp_max
tmp[tmp <= tmp_thresh] = 0
tmp[tmp > tmp_thresh] = 1
# Label the resulting structures
labelled_image[it,:,:], nlabels[it] = label(tmp, return_num=True,
background=0)
return(labelled_image, nlabels)
def count_structures(run, labelled_image, nlabels):
"""
Remove any structures which are too small and count structures.
"""
nblobs = np.empty(run.nt, dtype=int)
for it in range(run.nt):
hist = np.histogram(np.ravel(labelled_image[it]),
bins=range(1,nlabels[it]+1))[0]
smallest_struc = np.mean(hist)*0.1
hist = hist[hist > smallest_struc]
nblobs[it] = len(hist)
return(nblobs)
def plot_no_structures(run, no_structures, perc_thresh):
"""
Plot number of structures as a function of time.
"""
plt.clf()
plt.plot(no_structures)
plt.xlabel('Time index')
plt.ylabel('Number of structures')
plt.ylim(0)
plt.savefig(run.run_dir + 'analysis/structures_' + str(perc_thresh) +
'/nblobs.pdf')
def save_results(run, no_structures, perc_thresh):
"""
Save the number of structures as a function of time in a file.
"""
np.savetxt(run.run_dir + 'analysis/structures_' + str(perc_thresh) +
'/nblobs.csv', np.transpose((range(run.nt), no_structures)),
delimiter=',', fmt='%d', header='t_index,nblobs')
def make_film(run, no_structures, labelled_image, perc_thresh):
titles = []
for it in range(run.nt):
titles.append('No. of structures = {}'.format(no_structures[it]))
plot_options = {'cmap':'gist_rainbow',
'levels':np.arange(-1,np.max(labelled_image))
}
options = {'file_name':'structures',
'film_dir':run.run_dir + 'analysis/structures_' +
str(perc_thresh) ,
'frame_dir':run.run_dir + 'analysis/structures_' +
str(perc_thresh) + '/film_frames',
'nprocs':None,
'aspect':'equal',
'xlabel':r'$x$ (m)',
'ylabel':r'$y$ (m)',
'cbar_ticks':np.arange(-1,np.max(labelled_image),2),
'cbar_label':r'Label',
'fps':10,
'bbox_inches':'tight',
'title':titles
}
pf.make_film_2d(run.r, run.z, labelled_image,
plot_options=plot_options, options=options)
if __name__ == '__main__':
run = Run(sys.argv[1])
structure_analysis(run, 75, create_film=False)
structure_analysis(run, 95, create_film=False)
| ferdinandvwyk/gs2_analysis | structure_analysis.py | Python | gpl-2.0 | 4,637 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
# pylint: disable=E1102
"""WebSearch module regression tests."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase, InvenioXmlTestCase
import re
import urlparse, cgi
import sys
import cStringIO
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from mechanize import Browser, LinkNotFoundError
from invenio.config import (CFG_SITE_URL,
CFG_SITE_NAME,
CFG_SITE_LANG,
CFG_SITE_RECORD,
CFG_SITE_LANGS,
CFG_SITE_SECURE_URL,
CFG_WEBSEARCH_SPIRES_SYNTAX,
CFG_BASE_URL)
from invenio.testutils import (make_test_suite,
run_test_suite,
nottest,
make_url,
make_surl,
make_rurl,
test_web_page_content,
merge_error_messages,
InvenioXmlTestCase)
from invenio.urlutils import same_urls_p
from invenio.dbquery import run_sql
from invenio.webinterface_handler_wsgi import SimulatedModPythonRequest
from invenio.search_engine import perform_request_search, \
guess_primary_collection_of_a_record, guess_collection_of_a_record, \
collection_restricted_p, get_permitted_restricted_collections, \
search_pattern, search_unit, search_unit_in_bibrec, \
wash_colls, record_public_p
from invenio import search_engine_summarizer
from invenio.search_engine_utils import get_fieldvalues
from invenio.intbitset import intbitset
from invenio.search_engine import intersect_results_with_collrecs
from invenio.bibrank_bridge_utils import get_external_word_similarity_ranker
from invenio.search_engine_query_parser_unit_tests import DATEUTIL_AVAILABLE
from invenio.bibindex_engine_utils import get_index_tags
from invenio.bibindex_engine_config import CFG_BIBINDEX_INDEX_TABLE_TYPE
if 'fr' in CFG_SITE_LANGS:
lang_french_configured = True
else:
lang_french_configured = False
def parse_url(url):
parts = urlparse.urlparse(url)
query = cgi.parse_qs(parts[4], True)
return parts[2].split('/')[1:], query
def string_combinations(str_list):
"""Returns all the possible combinations of the strings in the list.
Example: for the list ['A','B','Cd'], it will return
[['Cd', 'B', 'A'], ['B', 'A'], ['Cd', 'A'], ['A'], ['Cd', 'B'], ['B'], ['Cd'], []]
It adds "B", "H", "F" and "S" values to the results so different
combinations of them are also checked.
"""
out_list = []
for i in range(len(str_list) + 1):
out_list += list(combinations(str_list, i))
for i in range(len(out_list)):
out_list[i] = (list(out_list[i]) + {
0: lambda: ["B", "H", "S"],
1: lambda: ["B", "H", "F"],
2: lambda: ["B", "F", "S"],
3: lambda: ["B", "F"],
4: lambda: ["B", "S"],
5: lambda: ["B", "H"],
6: lambda: ["B"]
}[i % 7]())
return out_list
def combinations(iterable, r):
"""Return r length subsequences of elements from the input iterable."""
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
class WebSearchWebPagesAvailabilityTest(InvenioTestCase):
"""Check WebSearch web pages whether they are up or not."""
def test_search_interface_pages_availability(self):
"""websearch - availability of search interface pages"""
baseurl = CFG_SITE_URL + '/'
_exports = ['', 'collection/Poetry', 'collection/Poetry?as=1']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_search_results_pages_availability(self):
"""websearch - availability of search results pages"""
baseurl = CFG_SITE_URL + '/search'
_exports = ['', '?c=Poetry', '?p=ellis', '/cache', '/log']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_search_detailed_record_pages_availability(self):
"""websearch - availability of search detailed record pages"""
baseurl = CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/'
_exports = ['', '1', '1/', '1/files', '1/files/']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_browse_results_pages_availability(self):
"""websearch - availability of browse results pages"""
baseurl = CFG_SITE_URL + '/search'
_exports = ['?p=ellis&f=author&action_browse=Browse']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_help_page_availability(self):
"""websearch - availability of Help Central page"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help',
expected_text="Help Central"))
if lang_french_configured:
def test_help_page_availability_fr(self):
"""websearch - availability of Help Central page in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/?ln=fr',
expected_text="Centre d'aide"))
def test_search_tips_page_availability(self):
"""websearch - availability of Search Tips"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-tips',
expected_text="Search Tips"))
if lang_french_configured:
def test_search_tips_page_availability_fr(self):
"""websearch - availability of Search Tips in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-tips?ln=fr',
expected_text="Conseils de recherche"))
def test_search_guide_page_availability(self):
"""websearch - availability of Search Guide"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-guide',
expected_text="Search Guide"))
if lang_french_configured:
def test_search_guide_page_availability_fr(self):
"""websearch - availability of Search Guide in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-guide?ln=fr',
expected_text="Guide de recherche"))
class WebSearchTestLegacyURLs(InvenioTestCase):
""" Check that the application still responds to legacy URLs for
navigating, searching and browsing."""
def test_legacy_collections(self):
""" websearch - collections handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# Use the root URL unless we need more
check(make_url('/', c=CFG_SITE_NAME),
make_url('/', ln=CFG_SITE_LANG))
# Other collections are redirected in the /collection area
check(make_url('/', c='Poetry'),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
# Drop unnecessary arguments, like ln and as (when they are
# the default value)
args = {'as': 0}
check(make_url('/', c='Poetry', **args),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
# Otherwise, keep them
args = {'as': 1, 'ln': CFG_SITE_LANG}
check(make_url('/', c='Poetry', **args),
make_url('/collection/Poetry', **args))
# Support the /index.py addressing too
check(make_url('/index.py', c='Poetry'),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
def test_legacy_search(self):
""" websearch - search queries handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# /search.py is redirected on /search
# Note that `as' is a reserved word in Python 2.5
check(make_url('/search.py', p='nuclear', ln='en') + 'as=1',
make_url('/search', p='nuclear', ln='en') + 'as=1')
if lang_french_configured:
def test_legacy_search_fr(self):
""" websearch - search queries handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# direct recid searches are redirected to /CFG_SITE_RECORD
check(make_url('/search.py', recid=1, ln='fr'),
make_url('/%s/1' % CFG_SITE_RECORD, ln='fr'))
def test_legacy_search_help_link(self):
"""websearch - legacy Search Help page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/index.en.html',
expected_text="Help Central"))
if lang_french_configured:
def test_legacy_search_tips_link(self):
"""websearch - legacy Search Tips page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/tips.fr.html',
expected_text="Conseils de recherche"))
def test_legacy_search_guide_link(self):
"""websearch - legacy Search Guide page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/guide.en.html',
expected_text="Search Guide"))
class WebSearchTestRecord(InvenioTestCase):
""" Check the interface of the /CFG_SITE_RECORD results """
def test_format_links(self):
""" websearch - check format links for records """
browser = Browser()
# We open the record in all known HTML formats
for hformat in ('hd', 'hx', 'hm'):
browser.open(make_url('/%s/1' % CFG_SITE_RECORD, of=hformat))
if hformat == 'hd':
# hd format should have a link to the following
# formats
for oformat in ('hx', 'hm', 'xm', 'xd'):
target = '%s/%s/1/export/%s?ln=en' % \
(CFG_BASE_URL, CFG_SITE_RECORD, oformat)
try:
browser.find_link(url=target)
except LinkNotFoundError:
self.fail('link %r should be in page' % target)
else:
# non-hd HTML formats should have a link back to
# the main detailed record
target = '%s/%s/1' % (CFG_BASE_URL, CFG_SITE_RECORD)
try:
browser.find_link(url=target)
except LinkNotFoundError:
self.fail('link %r should be in page' % target)
return
def test_exported_formats(self):
""" websearch - check formats exported through /CFG_SITE_RECORD/1/export/ URLs"""
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hm' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hd' % CFG_SITE_RECORD),
expected_text='<strong>ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/xm' % CFG_SITE_RECORD),
expected_text='<subfield code="a">ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/xd' % CFG_SITE_RECORD),
expected_text='<dc:title>ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hs' % CFG_SITE_RECORD),
expected_text='<a href="/%s/1?ln=%s">ALEPH experiment' % \
(CFG_SITE_RECORD, CFG_SITE_LANG)))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hx' % CFG_SITE_RECORD),
expected_text='title = "{ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/t?ot=245' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertNotEqual([],
test_web_page_content(make_url('/%s/1/export/t?ot=245' % CFG_SITE_RECORD),
expected_text='001__'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/h?ot=245' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertNotEqual([],
test_web_page_content(make_url('/%s/1/export/h?ot=245' % CFG_SITE_RECORD),
expected_text='001__'))
return
def test_plots_tab(self):
""" websearch - test to ensure the plots tab is working """
self.assertEqual([],
test_web_page_content(make_url('/%s/8/plots' % CFG_SITE_RECORD),
expected_text='div id="clip"',
unexpected_text='Abstract'))
def test_meta_header(self):
""" websearch - test that metadata embedded in header of hd
relies on hdm format and Default_HTML_meta bft, but hook is in
websearch to display the format
"""
self.assertEqual([],
test_web_page_content(make_url('/record/1'),
expected_text='<meta content="ALEPH experiment: Candidate of Higgs boson production" name="citation_title" />'))
return
class WebSearchTestCollections(InvenioTestCase):
def test_traversal_links(self):
""" websearch - traverse all the publications of a collection """
browser = Browser()
try:
for aas in (0, 1):
args = {'as': aas}
browser.open(make_url('/collection/Preprints', **args))
for jrec in (11, 21, 11, 27):
args = {'jrec': jrec, 'cc': 'Preprints'}
if aas:
args['as'] = aas
url = make_rurl('/search', **args)
try:
browser.follow_link(url=url)
except LinkNotFoundError:
args['ln'] = CFG_SITE_LANG
url = make_rurl('/search', **args)
browser.follow_link(url=url)
except LinkNotFoundError:
self.fail('no link %r in %r' % (url, browser.geturl()))
def test_collections_links(self):
""" websearch - enter in collections and subcollections """
browser = Browser()
def tryfollow(url):
cur = browser.geturl()
body = browser.response().read()
try:
browser.follow_link(url=url)
except LinkNotFoundError:
print body
self.fail("in %r: could not find %r" % (
cur, url))
return
for aas in (0, 1):
if aas:
kargs = {'as': 1}
else:
kargs = {}
kargs['ln'] = CFG_SITE_LANG
# We navigate from immediate son to immediate son...
browser.open(make_url('/', **kargs))
tryfollow(make_rurl('/collection/Articles%20%26%20Preprints',
**kargs))
tryfollow(make_rurl('/collection/Articles', **kargs))
# But we can also jump to a grandson immediately
browser.back()
browser.back()
tryfollow(make_rurl('/collection/ALEPH', **kargs))
return
def test_records_links(self):
""" websearch - check the links toward records in leaf collections """
browser = Browser()
browser.open(make_url('/collection/Preprints'))
def harvest():
""" Parse all the links in the page, and check that for
each link to a detailed record, we also have the
corresponding link to the similar records."""
records = set()
similar = set()
for link in browser.links():
path, q = parse_url(link.url)
if not path:
continue
if path[0] == CFG_SITE_RECORD:
records.add(int(path[1]))
continue
if path[0] == 'search':
if not q.get('rm') == ['wrd']:
continue
recid = q['p'][0].split(':')[1]
similar.add(int(recid))
self.failUnlessEqual(records, similar)
return records
# We must have 10 links to the corresponding /CFG_SITE_RECORD
found = harvest()
self.failUnlessEqual(len(found), 10)
# When clicking on the "Search" button, we must also have
# these 10 links on the records.
browser.select_form(name="search")
browser.submit()
found = harvest()
self.failUnlessEqual(len(found), 10)
return
def test_em_parameter(self):
""" websearch - check different values of em return different parts of the collection page"""
for combi in string_combinations(["L", "P", "Prt"]):
url = '/collection/Articles?em=%s' % ','.join(combi)
expected_text = ["<strong>Development of photon beam diagnostics for VUV radiation from a SASE FEL</strong>"]
unexpected_text = []
if "H" in combi:
expected_text.append(">Atlantis Institute of Fictive Science</a>")
else:
unexpected_text.append(">Atlantis Institute of Fictive Science</a>")
if "F" in combi:
expected_text.append("This site is also available in the following languages:")
else:
unexpected_text.append("This site is also available in the following languages:")
if "S" in combi:
expected_text.append('value="Search"')
else:
unexpected_text.append('value="Search"')
if "L" in combi:
expected_text.append('Search also:')
else:
unexpected_text.append('Search also:')
if "Prt" in combi or "P" in combi:
expected_text.append('<div class="portalboxheader">ABOUT ARTICLES</div>')
else:
unexpected_text.append('<div class="portalboxheader">ABOUT ARTICLES</div>')
self.assertEqual([], test_web_page_content(make_url(url),
expected_text=expected_text,
unexpected_text=unexpected_text))
def test_canonical_and_alternate_urls_quoting(self):
""" websearch - check that canonical and alternate URL in collection page header are properly quoted"""
url = CFG_SITE_URL + '/collection/Experimental%20Physics%20%28EP%29?ln=en'
expected_text = ['<link rel="alternate" hreflang="en" href="' + CFG_SITE_URL + '/collection/Experimental%20Physics%20%28EP%29?ln=en" />',
'<link rel="canonical" href="' + CFG_SITE_URL + '/collection/Experimental%20Physics%20%28EP%29" />']
unexpected_text = ['<link rel="alternate" hreflang="en" href="' + CFG_SITE_URL + '/collection/Experimental Physics (EP)?ln=en" />',
'<link rel="canonical" href="' + CFG_SITE_URL + '/collection/Experimental Physics (EP)" />']
self.assertEqual([], test_web_page_content(url,
expected_text=expected_text,
unexpected_text=unexpected_text))
class WebSearchTestBrowse(InvenioTestCase):
def test_browse_field(self):
""" websearch - check that browsing works """
browser = Browser()
browser.open(make_url('/', ln="en"))
browser.select_form(name='search')
browser['f'] = ['title']
browser.submit(name='action_browse')
def collect():
# We'll get a few links to search for the actual hits, plus a
# link to the following results.
res = []
for link in browser.links():
if not link.url.startswith("%s/search" % (CFG_BASE_URL,)):
continue
if "as=1" in link.url or "action=browse" in link.url:
continue
for attr in link.attrs:
if "class" in attr:
break
else:
dummy, q = parse_url(link.url)
res.append((link, q))
return res
# Here we should have 4 links to different records
batch_1 = collect()
self.assertEqual(4, len(batch_1))
# if we follow the next link, we should get another
# batch of 4. There is an overlap of one item.
next_link = [l for l in browser.links() if l.text == "next"][0]
browser.follow_link(link=next_link)
batch_2 = collect()
self.assertEqual(8, len(batch_2))
# FIXME: we cannot compare the whole query, as the collection
# set is not equal
# Expecting "A naturalist\'s voyage around the world"
# Last link in batch_1 should equal the 4th link in batch_2
self.failUnlessEqual(batch_1[-1][1]['p'], batch_2[3][1]['p'])
def test_browse_restricted_record_as_unauthorized_user(self):
"""websearch - browse for a record that belongs to a restricted collection as an unauthorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&ln=en',
username = 'guest',
expected_text = ['Hits', '088__a'],
unexpected_text = ['>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_unauthorized_user_in_restricted_collection(self):
"""websearch - browse for a record that belongs to a restricted collection as an unauthorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&c=ALEPH+Theses&ln=en',
username='guest',
expected_text= ['This collection is restricted'],
unexpected_text= ['Hits', '>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_authorized_user(self):
"""websearch - browse for a record that belongs to a restricted collection as an authorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&ln=en',
username='admin',
password='',
expected_text= ['Hits', '088__a'],
unexpected_text = ['>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_authorized_user_in_restricted_collection(self):
"""websearch - browse for a record that belongs to a restricted collection as an authorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&c=ALEPH+Theses&ln=en',
username='admin',
password='',
expected_text= ['Hits', '>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_exact_author_help_link(self):
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=Dasse%2C+Michel&f=author&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=Dasse%2C+Michel&f=firstauthor&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&as=1&m1=a&p1=Dasse%2C+Michel&f1=author&op1=a&m2=a&p2=&f2=firstauthor&op2=a&m3=a&p3=&f3=&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchTestOpenURL(InvenioTestCase):
def test_isbn_01(self):
""" websearch - isbn query via OpenURL 0.1"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl', isbn='0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
def test_isbn_10_rft_id(self):
""" websearch - isbn query via OpenURL 1.0 - rft_id"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl', rft_id='urn:ISBN:0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
def test_isbn_10(self):
""" websearch - isbn query via OpenURL 1.0"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl?rft.isbn=0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
class WebSearchTestSearch(InvenioTestCase):
def test_hits_in_other_collection(self):
""" websearch - check extension of a query to the home collection """
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/collection/ISOLDE', ln='en'))
browser.select_form(name='search')
browser['f'] = ['author']
browser['p'] = 'matsubara'
browser.submit()
dummy, current_q = parse_url(browser.geturl())
link = browser.find_link(text_regex=re.compile('.*hit', re.I))
dummy, target_q = parse_url(link.url)
# the target query should be the current query without any c
# or cc specified.
for f in ('cc', 'c', 'action_search'):
if f in current_q:
del current_q[f]
self.failUnlessEqual(current_q, target_q)
def test_nearest_terms(self):
""" websearch - provide a list of nearest terms """
browser = Browser()
browser.open(make_url(''))
# Search something weird
browser.select_form(name='search')
browser['p'] = 'gronf'
browser.submit()
dummy, original = parse_url(browser.geturl())
for to_drop in ('cc', 'action_search', 'f'):
if to_drop in original:
del original[to_drop]
if 'ln' not in original:
original['ln'] = [CFG_SITE_LANG]
# we should get a few searches back, which are identical
# except for the p field being substituted (and the cc field
# being dropped).
if 'cc' in original:
del original['cc']
for link in browser.links(url_regex=re.compile(CFG_SITE_URL + r'/search\?')):
if link.text == 'Advanced Search':
continue
dummy, target = parse_url(link.url)
if 'ln' not in target:
target['ln'] = [CFG_SITE_LANG]
original['p'] = [link.text]
self.failUnlessEqual(original, target)
return
def test_switch_to_simple_search(self):
""" websearch - switch to simple search """
browser = Browser()
args = {'as': 1}
browser.open(make_url('/collection/ISOLDE', **args))
browser.select_form(name='search')
browser['p1'] = 'tandem'
browser['f1'] = ['title']
browser.submit()
browser.follow_link(text='Simple Search')
dummy, q = parse_url(browser.geturl())
self.failUnlessEqual(q, {'cc': ['ISOLDE'],
'p': ['tandem'],
'f': ['title'],
'ln': ['en']})
def test_switch_to_advanced_search(self):
""" websearch - switch to advanced search """
browser = Browser()
browser.open(make_url('/collection/ISOLDE'))
browser.select_form(name='search')
browser['p'] = 'tandem'
browser['f'] = ['title']
browser.submit()
browser.follow_link(text='Advanced Search')
dummy, q = parse_url(browser.geturl())
self.failUnlessEqual(q, {'cc': ['ISOLDE'],
'p1': ['tandem'],
'f1': ['title'],
'as': ['1'],
'ln' : ['en']})
def test_no_boolean_hits(self):
""" websearch - check the 'no boolean hits' proposed links """
browser = Browser()
browser.open(make_url(''))
browser.select_form(name='search')
browser['p'] = 'quasinormal muon'
browser.submit()
dummy, q = parse_url(browser.geturl())
for to_drop in ('cc', 'action_search', 'f'):
if to_drop in q:
del q[to_drop]
for bsu in ('quasinormal', 'muon'):
l = browser.find_link(text=bsu)
q['p'] = bsu
if not same_urls_p(l.url, make_rurl('/search', **q)):
self.fail(repr((l.url, make_rurl('/search', **q))))
def test_similar_authors(self):
""" websearch - test similar authors box """
browser = Browser()
browser.open(make_url(''))
browser.select_form(name='search')
browser['p'] = 'Ellis, R K'
browser['f'] = ['author']
browser.submit()
l = browser.find_link(text="Ellis, R S")
urlargs = dict(p="Ellis, R S", f='author', ln='en')
self.failUnless(same_urls_p(l.url, make_rurl('/search', **urlargs)))
def test_em_parameter(self):
""" websearch - check different values of em return different parts of the search page"""
for combi in string_combinations(["K", "A", "I", "O"]):
url = '/search?ln=en&cc=Articles+%%26+Preprints&sc=1&c=Articles&c=Preprints&em=%s' % ','.join(combi)
expected_text = ["<strong>Development of photon beam diagnostics for VUV radiation from a SASE FEL</strong>"]
unexpected_text = []
if "H" in combi:
expected_text.append(">Atlantis Institute of Fictive Science</a>")
else:
unexpected_text.append(">Atlantis Institute of Fictive Science</a>")
if "F" in combi:
expected_text.append("This site is also available in the following languages:")
else:
unexpected_text.append("This site is also available in the following languages:")
if "S" in combi:
expected_text.append('value="Search"')
else:
unexpected_text.append('value="Search"')
if "K" in combi:
expected_text.append('value="Add to basket"')
else:
unexpected_text.append('value="Add to basket"')
if "A" in combi:
expected_text.append('Interested in being notified about new results for this query?')
else:
unexpected_text.append('Interested in being notified about new results for this query?')
if "I" in combi:
expected_text.append('jump to record:')
else:
unexpected_text.append('jump to record:')
if "O" in combi:
expected_text.append('<th class="searchresultsboxheader"><strong>Results overview:</strong> Found <strong>')
else:
unexpected_text.append('<th class="searchresultsboxheader"><strong>Results overview:</strong> Found <strong>')
self.assertEqual([], test_web_page_content(make_url(url),
expected_text=expected_text,
unexpected_text=unexpected_text))
return
class WebSearchCJKTokenizedSearchTest(InvenioTestCase):
"""
Reindexes record 104 (the one with chinese poetry) with use of BibIndexCJKTokenizer.
After tests it reindexes record 104 back with BibIndexDefaultTokenizer.
Checks if one can find record 104 specifying only one or two CJK characters.
"""
test_counter = 0
reindexed = False
index_name = 'title'
@classmethod
def setUp(self):
if not self.reindexed:
from invenio.bibindex_engine import WordTable, AbstractIndexTable
query = """SELECT last_updated FROM idxINDEX WHERE name='%s'""" % self.index_name
self.last_updated = run_sql(query)[0][0]
query = """UPDATE idxINDEX SET tokenizer='BibIndexCJKTokenizer', last_updated='0000-00-00 00:00:00'
WHERE name='%s'""" % self.index_name
run_sql(query)
self.reindexed = True
wordTable = WordTable(index_name=self.index_name,
fields_to_index=get_index_tags(self.index_name),
table_type = CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"])
wordTable.turn_off_virtual_indexes()
wordTable.add_recIDs([[104, 104]], 10000)
@classmethod
def tearDown(self):
self.test_counter += 1
if self.test_counter == 2:
from invenio.bibindex_engine import WordTable, AbstractIndexTable
query = """UPDATE idxINDEX SET tokenizer='BibIndexDefaultTokenizer', last_updated='%s'
WHERE name='%s'""" % (self.last_updated, self.index_name)
run_sql(query)
wordTable = WordTable(index_name=self.index_name,
fields_to_index=get_index_tags(self.index_name),
table_type = CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"])
wordTable.turn_off_virtual_indexes()
wordTable.add_recIDs([[104, 104]], 10000)
def test_title_cjk_tokenized_two_characters(self):
"""CJKTokenizer - test for finding chinese poetry with two CJK characters"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=title%3A敬亭&f=&of=id',
expected_text='[104]'))
def test_title_cjk_tokenized_single_character(self):
"""CJKTokenizer - test for finding chinese poetry with one CJK character"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=title%3A亭&f=&of=id',
expected_text='[104]'))
class WebSearchTestWildcardLimit(InvenioTestCase):
"""Checks if the wildcard limit is correctly passed and that
users without autorization can not exploit it"""
def test_wildcard_limit_correctly_passed_when_not_set(self):
"""websearch - wildcard limit is correctly passed when default"""
self.assertEqual(search_pattern(p='e*', f='author'),
search_pattern(p='e*', f='author', wl=1000))
def test_wildcard_limit_correctly_passed_when_set(self):
"""websearch - wildcard limit is correctly passed when set"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=e*&f=author&of=id&wl=5&rg=100',
expected_text="[96, 92, 88, 81, 74, 72, 67, 54, 53, 52, 51, 50, 48, 46, 17, 11, 10, 9]"))
def test_wildcard_limit_correctly_not_active(self):
"""websearch - wildcard limit is not active when there is no wildcard query"""
self.assertEqual(search_pattern(p='ellis', f='author'),
search_pattern(p='ellis', f='author', wl=1))
def test_wildcard_limit_increased_by_authorized_users(self):
"""websearch - wildcard limit increased by authorized user"""
browser = Browser()
#try a search query, with no wildcard limit set by the user
browser.open(make_url('/search?p=a*&of=id'))
recid_list_guest_no_limit = browser.response().read() # so the limit is CGF_WEBSEARCH_WILDCARD_LIMIT
#try a search query, with a wildcard limit imposed by the user
#wl=1000000 - a very high limit,higher then what the CFG_WEBSEARCH_WILDCARD_LIMIT might be
browser.open(make_url('/search?p=a*&of=id&wl=1000000'))
recid_list_guest_with_limit = browser.response().read()
#same results should be returned for a search without the wildcard limit set by the user
#and for a search with a large limit set by the user
#in this way we know that nomatter how large the limit is, the wildcard query will be
#limitted by CFG_WEBSEARCH_WILDCARD_LIMIT (for a guest user)
self.failIf(len(recid_list_guest_no_limit.split(',')) != len(recid_list_guest_with_limit.split(',')))
##login as admin
browser.open(make_surl('/youraccount/login'))
browser.select_form(nr=0)
browser['p_un'] = 'admin'
browser['p_pw'] = ''
browser.submit()
#try a search query, with a wildcard limit imposed by an authorized user
#wl = 10000 a very high limit, higher then what the CFG_WEBSEARCH_WILDCARD_LIMIT might be
browser.open(make_surl('/search?p=a*&of=id&wl=10000'))
recid_list_authuser_with_limit = browser.response().read()
#the authorized user can set whatever limit he might wish
#so, the results returned for the auth. users should exceed the results returned for unauth. users
self.failUnless(len(recid_list_guest_no_limit.split(',')) <= len(recid_list_authuser_with_limit.split(',')))
#logout
browser.open(make_surl('/youraccount/logout'))
browser.response().read()
browser.close()
class WebSearchNearestTermsTest(InvenioTestCase):
"""Check various alternatives of searches leading to the nearest
terms box."""
def test_nearest_terms_box_in_okay_query(self):
""" websearch - no nearest terms box for a successful query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text="jump to record"))
def test_nearest_terms_box_in_unsuccessful_simple_query(self):
""" websearch - nearest terms box for unsuccessful simple query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=embed",
expected_link_label='embed'))
def test_nearest_terms_box_in_unsuccessful_simple_accented_query(self):
""" websearch - nearest terms box for unsuccessful accented query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=elliszà',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=embed",
expected_link_label='embed'))
def test_nearest_terms_box_in_unsuccessful_structured_query(self):
""" websearch - nearest terms box for unsuccessful structured query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellisz&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=eisenhandler&f=author",
expected_link_label='eisenhandler'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3Aellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=author%3Aeisenhandler",
expected_link_label='eisenhandler'))
def test_nearest_terms_box_in_query_with_invalid_index(self):
""" websearch - nearest terms box for queries with invalid indexes specified """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=bednarz%3Aellis',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=bednarz",
expected_link_label='bednarz'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=1%3Aellis',
expected_text="no index 1.",
expected_link_target=CFG_BASE_URL+"/record/47?ln=en",
expected_link_label="Detailed record"))
def test_nearest_terms_box_in_unsuccessful_phrase_query(self):
""" websearch - nearest terms box for unsuccessful phrase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis%2C+Z%22',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=author%3A%22Enqvist%2C+K%22",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%22ellisz%22&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=%22Enqvist%2C+K%22&f=author",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%22elliszà%22&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=%22Enqvist%2C+K%22&f=author",
expected_link_label='Enqvist, K'))
def test_nearest_terms_box_in_unsuccessful_partial_phrase_query(self):
""" websearch - nearest terms box for unsuccessful partial phrase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%27Ellis%2C+Z%27',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=author%3A%27Enqvist%2C+K%27",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%27ellisz%27&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=%27Enqvist%2C+K%27&f=author",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%27elliszà%27&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=%27Enqvist%2C+K%27&f=author",
expected_link_label='Enqvist, K'))
def test_nearest_terms_box_in_unsuccessful_partial_phrase_advanced_query(self):
""" websearch - nearest terms box for unsuccessful partial phrase advanced search query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p1=aaa&f1=title&m1=p&as=1',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&f1=title&as=1&p1=A+simple+functional+form+for+proton-nucleus+total+reaction+cross+sections&m1=p",
expected_link_label='A simple functional form for proton-nucleus total reaction cross sections'))
def test_nearest_terms_box_in_unsuccessful_exact_phrase_advanced_query(self):
""" websearch - nearest terms box for unsuccessful exact phrase advanced search query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p1=aaa&f1=title&m1=e&as=1',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&f1=title&as=1&p1=A+simple+functional+form+for+proton-nucleus+total+reaction+cross+sections&m1=e",
expected_link_label='A simple functional form for proton-nucleus total reaction cross sections'))
def test_nearest_terms_box_in_unsuccessful_boolean_query(self):
""" websearch - nearest terms box for unsuccessful boolean query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3Aellisz+author%3Aellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aellisz",
expected_link_label='energi'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3Aenergi+author%3Aenergie',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aenqvist",
expected_link_label='enqvist'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=title%3Aellisz+author%3Aellisz&f=keyword',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aellisz&f=keyword",
expected_link_label='energi'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=title%3Aenergi+author%3Aenergie&f=keyword',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aenqvist&f=keyword",
expected_link_label='enqvist'))
def test_nearest_terms_box_in_unsuccessful_uppercase_query(self):
""" websearch - nearest terms box for unsuccessful uppercase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=fOo%3Atest',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=food",
expected_link_label='food'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=arXiv%3A1007.5048',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=artist",
expected_link_label='artist'))
def test_nearest_terms_box_in_unsuccessful_spires_query(self):
""" websearch - nearest terms box for unsuccessful spires query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=find+a+foobar',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=find+a+finch",
expected_link_label='finch'))
class WebSearchBooleanQueryTest(InvenioTestCase):
"""Check various boolean queries."""
def test_successful_boolean_query(self):
""" websearch - successful boolean query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis+muon',
expected_text="records found",
expected_link_label="Detailed record"))
def test_unsuccessful_boolean_query_where_all_individual_terms_match(self):
""" websearch - unsuccessful boolean query where all individual terms match """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis+muon+letter',
expected_text="Boolean query returned no hits. Please combine your search terms differently."))
def test_unsuccessful_boolean_query_in_advanced_search_where_all_individual_terms_match(self):
""" websearch - unsuccessful boolean query in advanced search where all individual terms match """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?m1=a&p1=ellis&op1=a&m2=a&p2=muon&op2=a&p3=letter',
expected_text="Boolean query returned no hits. Please combine your search terms differently."))
class WebSearchAuthorQueryTest(InvenioTestCase):
"""Check various author-related queries."""
def test_propose_similar_author_names_box(self):
""" websearch - propose similar author names box """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=Ellis%2C+R&f=author',
expected_text="See also: similar author names",
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=Ellis%2C+R+K&f=author",
expected_link_label="Ellis, R K"))
def test_do_not_propose_similar_author_names_box(self):
""" websearch - do not propose similar author names box """
errmsgs = test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis%2C+R%22',
expected_link_target=CFG_BASE_URL+"/search?ln=en&p=Ellis%2C+R+K&f=author",
expected_link_label="Ellis, R K")
if errmsgs[0].find("does not contain link to") > -1:
pass
else:
self.fail("Should not propose similar author names box.")
return
class WebSearchSearchEnginePythonAPITest(InvenioXmlTestCase):
"""Check typical search engine Python API calls on the demo data."""
def test_search_engine_python_api_for_failed_query(self):
"""websearch - search engine Python API for failed query"""
self.assertEqual([],
perform_request_search(p='aoeuidhtns'))
def test_search_engine_python_api_for_successful_query(self):
"""websearch - search engine Python API for successful query"""
self.assertEqual([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47],
perform_request_search(p='ellis'))
def test_search_engine_python_api_for_successful_query_format_intbitset(self):
"""websearch - search engine Python API for successful query, output format intbitset"""
self.assertEqual(intbitset([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47]),
perform_request_search(p='ellis', of='intbitset'))
def test_search_engine_web_api_jrec_parameter(self):
"""websearch - search engine Python API for successful query, ignore paging parameters"""
self.assertEqual([11, 12, 13, 14, 15, 16, 17, 18, 47],
perform_request_search(p='ellis', jrec=3))
def test_search_engine_web_api_paging_parameters(self):
"""websearch - search engine Python API for successful query, ignore paging parameters"""
self.assertEqual([11, 12, 13, 14, 15],
perform_request_search(p='ellis', rg=5, jrec=3))
def test_search_engine_python_api_respect_sorting_parameter(self):
"""websearch - search engine Python API for successful query, respect sorting parameters"""
self.assertEqual([77, 84, 85],
perform_request_search(p='klebanov'))
self.assertEqual([77, 85, 84],
perform_request_search(p='klebanov', sf='909C4v'))
def test_search_engine_python_api_respect_ranking_parameter(self):
"""websearch - search engine Python API for successful query, respect ranking parameters"""
self.assertEqual([77, 84, 85],
perform_request_search(p='klebanov'))
self.assertEqual([85, 77, 84],
perform_request_search(p='klebanov', rm='citation'))
def test_search_engine_python_api_for_existing_record(self):
"""websearch - search engine Python API for existing record"""
self.assertEqual([8],
perform_request_search(recid=8))
def test_search_engine_python_api_for_existing_record_format_intbitset(self):
"""websearch - search engine Python API for existing record, output format intbitset"""
self.assertEqual(intbitset([8]),
perform_request_search(recid=8, of='intbitset'))
def test_search_engine_python_api_for_nonexisting_record(self):
"""websearch - search engine Python API for non-existing record"""
self.assertEqual([],
perform_request_search(recid=12345678))
def test_search_engine_python_api_for_nonexisting_record_format_intbitset(self):
"""websearch - search engine Python API for non-existing record, output format intbitset"""
self.assertEqual(intbitset(),
perform_request_search(recid=16777215, of='intbitset'))
def test_search_engine_python_api_for_nonexisting_collection(self):
"""websearch - search engine Python API for non-existing collection"""
self.assertEqual([],
perform_request_search(c='Foo'))
def test_search_engine_python_api_for_range_of_records(self):
"""websearch - search engine Python API for range of records"""
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8, 9],
perform_request_search(recid=1, recidb=10))
def test_search_engine_python_api_old_style_ranked_by_citation(self):
"""websearch - search engine Python API old style citation ranking"""
self.assertEqual([86, 77],
perform_request_search(p='recid:95', rm='citation'))
def test_search_engine_python_api_textmarc_full(self):
"""websearch - search engine Python API for Text MARC output, full"""
req = make_fake_request()
perform_request_search(req=req, p='higgs', of='tm', so='d')
out = req.test_output_buffer.getvalue()
self.assertMultiLineEqual(out, """\
000000107 001__ 107
000000107 003__ SzGeCERN
000000107 005__ %(rec_107_rev)s
000000107 035__ $$9SPIRES$$a4066995
000000107 037__ $$aCERN-EP-99-060
000000107 041__ $$aeng
000000107 084__ $$2CERN Library$$aEP-1999-060
000000107 088__ $$9SCAN-9910048
000000107 088__ $$aCERN-L3-175
000000107 110__ $$aCERN. Geneva
000000107 245__ $$aLimits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$
000000107 260__ $$c1999
000000107 269__ $$aGeneva$$bCERN$$c26 Apr 1999
000000107 300__ $$a18 p
000000107 490__ $$aALEPH Papers
000000107 500__ $$aPreprint not submitted to publication
000000107 65017 $$2SzGeCERN$$aParticle Physics - Experiment
000000107 690C_ $$aCERN
000000107 690C_ $$aPREPRINT
000000107 693__ $$aCERN LEP$$eALEPH
000000107 693__ $$aCERN LEP$$eDELPHI
000000107 693__ $$aCERN LEP$$eL3
000000107 693__ $$aCERN LEP$$eOPAL
000000107 695__ $$9MEDLINE$$asearches Higgs bosons
000000107 697C_ $$aLexiHiggs
000000107 710__ $$5EP
000000107 710__ $$gALEPH Collaboration
000000107 710__ $$gDELPHI Collaboration
000000107 710__ $$gL3 Collaboration
000000107 710__ $$gLEP Working Group for Higgs Boson Searches
000000107 710__ $$gOPAL Collaboration
000000107 901__ $$uCERN
000000107 916__ $$sh$$w199941
000000107 960__ $$a11
000000107 963__ $$aPUBLIC
000000107 970__ $$a000330309CER
000000107 980__ $$aARTICLE
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$s112828$$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 8564_ $$s151257$$u%(siteurl)s/record/85/files/0212181.pdf
000000085 859__ [email protected]
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
000000001 001__ 1
000000001 005__ %(rec_1_rev)s
000000001 037__ $$aCERN-EX-0106015
000000001 100__ $$aPhotolab
000000001 245__ $$aALEPH experiment: Candidate of Higgs boson production
000000001 246_1 $$aExpérience ALEPH: Candidat de la production d'un boson Higgs
000000001 260__ $$c14 06 2000
000000001 340__ $$aFILM
000000001 520__ $$aCandidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.
000000001 65017 $$2SzGeCERN$$aExperiments and Tracks
000000001 6531_ $$aLEP
000000001 8560_ [email protected]
000000001 8564_ $$s1585244$$u%(siteurl)s/record/1/files/0106015_01.jpg
000000001 8564_ $$s20954$$u%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon$$xicon
000000001 909C0 $$o0003717PHOPHO
000000001 909C0 $$y2000
000000001 909C0 $$b81
000000001 909C1 $$c2001-06-14$$l50$$m2001-08-27$$oCM
000000001 909CP $$pBldg. 2
000000001 909CP $$rCalder, N
000000001 909CS $$sn$$w200231
000000001 980__ $$aPICTURE
""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]})
def test_search_engine_python_api_ranked_by_citation_asc(self):
"""websearch - search engine Python API for citation ranking asc"""
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 75, 76, 80,
82, 83, 85, 86, 87, 88, 89, 90, 92, 93, 96, 97, 98,
99, 100, 101, 102, 103, 104, 107, 108, 109, 113, 127,
128, 18, 74, 79, 91, 94, 77, 78, 95, 84, 81],
perform_request_search(p='', rm='citation', so='a'))
def test_search_engine_python_api_ranked_by_citation_desc(self):
"""websearch - search engine Python API for citation ranking desc"""
self.assertEqual(list(reversed(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 75, 76, 80,
82, 83, 85, 86, 87, 88, 89, 90, 92, 93, 96, 97, 98,
99, 100, 101, 102, 103, 104, 107, 108, 109, 113, 127,
128, 18, 74, 79, 91, 94, 77, 78, 95, 84, 81])),
perform_request_search(p='', rm='citation', so='d'))
def test_search_engine_python_api_textmarc_field_filtered(self):
"""websearch - search engine Python API for Text MARC output, field-filtered"""
req = make_fake_request()
perform_request_search(req=req, p='higgs', of='tm', ot=['100', '700'])
out = req.test_output_buffer.getvalue()
self.assertEqual(out, """\
000000001 100__ $$aPhotolab
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
""")
def test_search_engine_python_api_for_intersect_results_with_one_collrec(self):
"""websearch - search engine Python API for intersect results with one collrec"""
self.assertEqual({'Books & Reports': intbitset([19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])},
intersect_results_with_collrecs(None, intbitset(range(0,110)), ['Books & Reports'], 'id', 0, 'en', False))
def test_search_engine_python_api_for_intersect_results_with_several_collrecs(self):
"""websearch - search engine Python API for intersect results with several collrecs"""
self.assertEqual({'Books': intbitset([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]),
'Reports': intbitset([19, 20]),
'Theses': intbitset([35, 36, 37, 38, 39, 40, 41, 42, 105])},
intersect_results_with_collrecs(None, intbitset(range(0,110)), ['Books', 'Theses', 'Reports'], 'id', 0, 'en', False))
def test_search_engine_python_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Python API for Text MARC output, field-filtered, hidden field, no guest access"""
req = make_fake_request()
perform_request_search(req=req, p='higgs', of='tm', ot=['100', '595'])
out = req.test_output_buffer.getvalue()
self.assertEqual(out, """\
000000001 100__ $$aPhotolab
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
""")
def test_search_engine_python_api_xmlmarc_full(self):
"""websearch - search engine Python API for XMLMARC output, full"""
req = make_fake_request(admin_user=False)
perform_request_search(req=req, p='higgs', of='xm', so='d')
out = req.test_output_buffer.getvalue()
# print out
self.assertXmlEqual(out, """<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_107_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SPIRES</subfield>
<subfield code="a">4066995</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EP-99-060</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">CERN Library</subfield>
<subfield code="a">EP-1999-060</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="9">SCAN-9910048</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="a">CERN-L3-175</subfield>
</datafield>
<datafield tag="110" ind1=" " ind2=" ">
<subfield code="a">CERN. Geneva</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Limits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">1999</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">26 Apr 1999</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">18 p</subfield>
</datafield>
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">ALEPH Papers</subfield>
</datafield>
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">Preprint not submitted to publication</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Experiment</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">CERN</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">PREPRINT</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">ALEPH</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">DELPHI</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">L3</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">OPAL</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">MEDLINE</subfield>
<subfield code="a">searches Higgs bosons</subfield>
</datafield>
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LexiHiggs</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="5">EP</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ALEPH Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">DELPHI Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">L3 Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">LEP Working Group for Higgs Boson Searches</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">OPAL Collaboration</subfield>
</datafield>
<datafield tag="901" ind1=" " ind2=" ">
<subfield code="u">CERN</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">199941</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">11</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">000330309CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">112828</subfield>
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">151257</subfield>
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">[email protected]</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<controlfield tag="005">%(rec_1_rev)s</controlfield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EX-0106015</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">ALEPH experiment: Candidate of Higgs boson production</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2="1">
<subfield code="a">Expérience ALEPH: Candidat de la production d'un boson Higgs</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">14 06 2000</subfield>
</datafield>
<datafield tag="340" ind1=" " ind2=" ">
<subfield code="a">FILM</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Candidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Experiments and Tracks</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="a">LEP</subfield>
</datafield>
<datafield tag="856" ind1="0" ind2=" ">
<subfield code="f">[email protected]</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">1585244</subfield>
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.jpg</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">20954</subfield>
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon</subfield>
<subfield code="x">icon</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="o">0003717PHOPHO</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="y">2000</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="b">81</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="1">
<subfield code="c">2001-06-14</subfield>
<subfield code="l">50</subfield>
<subfield code="m">2001-08-27</subfield>
<subfield code="o">CM</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="p">Bldg. 2</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="r">Calder, N</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="S">
<subfield code="s">n</subfield>
<subfield code="w">200231</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">PICTURE</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]})
def test_search_engine_python_api_xmlmarc_field_filtered(self):
"""websearch - search engine Python API for XMLMARC output, field-filtered"""
# we are testing example from /help/hacking/search-engine-api
req = make_fake_request()
perform_request_search(req=req, p='higgs', of='xm', ot=['100', '700'], so='d')
out = req.test_output_buffer.getvalue()
self.assertXmlEqual(out, """<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>""")
def test_search_engine_python_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Python API for XMLMARC output, field-filtered, hidden field, no guest access"""
# we are testing example from /help/hacking/search-engine-api
req = make_fake_request()
perform_request_search(req=req, p='higgs', of='xm', ot=['100', '595'], so='d')
out = req.test_output_buffer.getvalue()
self.assertXmlEqual(out, """<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>""")
def test_search_engine_python_api_long_author_with_quotes(self):
"""websearch - search engine Python API for p=author:"Abbot, R B"'""" \
"""this test was written along with a bug report, needs fixing."""
self.assertEqual([16], perform_request_search(p='author:"Abbott, R B"'))
class WebSearchSearchEngineWebAPITest(InvenioTestCase):
"""Check typical search engine Web API calls on the demo data."""
def test_search_engine_web_api_for_failed_query(self):
"""websearch - search engine Web API for failed query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=aoeuidhtns&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_failed_query_format_intbitset(self):
"""websearch - search engine Web API for failed query, output format intbitset"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=aoeuidhtns&of=intbitset',
expected_text=intbitset().fastdump()))
def test_search_engine_web_api_for_successful_query(self):
"""websearch - search engine Web API for successful query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id&rg=0',
expected_text="[47, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8]"))
def test_search_engine_web_api_no_paging_parameter(self):
"""websearch - search engine Web API for successful query, ignore paging parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id&rg=0',
expected_text="[47, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8]"))
def test_search_engine_web_api_jrec_parameter(self):
"""websearch - search engine Web API for successful query, ignore paging parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id&rg=0&jrec=3',
expected_text="[16, 15, 14, 13, 12, 11, 10, 9, 8]"))
def test_search_engine_web_api_paging_parameters(self):
"""websearch - search engine Web API for successful query, ignore paging parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id&rg=5&jrec=3',
expected_text="[16, 15, 14, 13, 12]"))
def test_search_engine_web_api_respect_sorting_parameter(self):
"""websearch - search engine Web API for successful query, respect sorting parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
expected_text="[85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
username="admin",
expected_text="[85, 84, 77]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&sf=909C4v',
expected_text="[84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&sf=909C4v',
username="admin",
expected_text="[84, 85, 77]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=intbitset&sf=909C4v',
username="admin",
expected_text=intbitset([77, 84, 85]).fastdump()))
def test_search_engine_web_api_respect_ranking_parameter(self):
"""websearch - search engine Web API for successful query, respect ranking parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
expected_text="[85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
username="admin",
expected_text="[85, 84, 77]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&rm=citation',
expected_text="[84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&rm=citation',
username="admin",
expected_text="[84, 77, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=intbitset&rm=citation',
username="admin",
expected_text=intbitset([77, 84, 85]).fastdump()))
def test_search_engine_web_api_for_existing_record(self):
"""websearch - search engine Web API for existing record"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=8&of=id',
expected_text="[8]"))
def test_search_engine_web_api_for_nonexisting_record(self):
"""websearch - search engine Web API for non-existing record"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=12345678&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_nonexisting_collection(self):
"""websearch - search engine Web API for non-existing collection"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?c=Foo&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_range_of_records(self):
"""websearch - search engine Web API for range of records"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=1&recidb=10&of=id',
expected_text="[1, 2, 3, 4, 5, 6, 7, 8, 9]"))
def test_search_engine_web_api_ranked_by_citation(self):
"""websearch - search engine Web API for citation ranking"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A81&rm=citation&of=id',
expected_text="[82, 83, 87, 89]"))
def test_search_engine_web_api_textmarc_full(self):
"""websearch - search engine Web API for Text MARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm',
expected_text="""\
000000107 001__ 107
000000107 003__ SzGeCERN
000000107 005__ %(rec_107_rev)s
000000107 035__ $$9SPIRES$$a4066995
000000107 037__ $$aCERN-EP-99-060
000000107 041__ $$aeng
000000107 084__ $$2CERN Library$$aEP-1999-060
000000107 088__ $$9SCAN-9910048
000000107 088__ $$aCERN-L3-175
000000107 110__ $$aCERN. Geneva
000000107 245__ $$aLimits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$
000000107 260__ $$c1999
000000107 269__ $$aGeneva$$bCERN$$c26 Apr 1999
000000107 300__ $$a18 p
000000107 490__ $$aALEPH Papers
000000107 500__ $$aPreprint not submitted to publication
000000107 65017 $$2SzGeCERN$$aParticle Physics - Experiment
000000107 690C_ $$aCERN
000000107 690C_ $$aPREPRINT
000000107 693__ $$aCERN LEP$$eALEPH
000000107 693__ $$aCERN LEP$$eDELPHI
000000107 693__ $$aCERN LEP$$eL3
000000107 693__ $$aCERN LEP$$eOPAL
000000107 695__ $$9MEDLINE$$asearches Higgs bosons
000000107 697C_ $$aLexiHiggs
000000107 710__ $$5EP
000000107 710__ $$gALEPH Collaboration
000000107 710__ $$gDELPHI Collaboration
000000107 710__ $$gL3 Collaboration
000000107 710__ $$gLEP Working Group for Higgs Boson Searches
000000107 710__ $$gOPAL Collaboration
000000107 901__ $$uCERN
000000107 916__ $$sh$$w199941
000000107 960__ $$a11
000000107 963__ $$aPUBLIC
000000107 970__ $$a000330309CER
000000107 980__ $$aARTICLE
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$s112828$$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 8564_ $$s151257$$u%(siteurl)s/record/85/files/0212181.pdf
000000085 859__ [email protected]
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
000000001 001__ 1
000000001 005__ %(rec_1_rev)s
000000001 037__ $$aCERN-EX-0106015
000000001 100__ $$aPhotolab
000000001 245__ $$aALEPH experiment: Candidate of Higgs boson production
000000001 246_1 $$aExpérience ALEPH: Candidat de la production d'un boson Higgs
000000001 260__ $$c14 06 2000
000000001 340__ $$aFILM
000000001 520__ $$aCandidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.
000000001 65017 $$2SzGeCERN$$aExperiments and Tracks
000000001 6531_ $$aLEP
000000001 8560_ [email protected]
000000001 8564_ $$s1585244$$u%(siteurl)s/record/1/files/0106015_01.jpg
000000001 8564_ $$s20954$$u%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon$$xicon
000000001 909C0 $$o0003717PHOPHO
000000001 909C0 $$y2000
000000001 909C0 $$b81
000000001 909C1 $$c2001-06-14$$l50$$m2001-08-27$$oCM
000000001 909CP $$pBldg. 2
000000001 909CP $$rCalder, N
000000001 909CS $$sn$$w200231
000000001 980__ $$aPICTURE
""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]}))
def test_search_engine_web_api_textmarc_field_filtered(self):
"""websearch - search engine Web API for Text MARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,700',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000001 100__ $$aPhotolab
"""))
def test_search_engine_web_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Web API for Text MARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,595',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000001 100__ $$aPhotolab
"""))
def test_search_engine_web_api_textmarc_field_filtered_hidden_admin(self):
"""websearch - search engine Web API for Text MARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,595',
username='admin',
expected_text="""\
000000107 595__ $$aNo authors
000000107 595__ $$aCERN-EP
000000107 595__ $$aOA
000000107 595__ $$aSIS:200740 PR/LKR not found (from SLAC, INSPEC)
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 595__ $$aLANL EDS
000000085 595__ $$aSIS LANLPUBL2004
000000085 595__ $$aSIS:2004 PR/LKR added
000000001 100__ $$aPhotolab
000000001 595__ $$aPress
"""))
def test_search_engine_web_api_textmarc_subfield_values(self):
"""websearch - search engine Web API for Text MARC output, subfield values"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=700__a',
expected_text="""\
Porrati, Massimo
Zaffaroni, A
"""))
def test_search_engine_web_api_xmlmarc_full(self):
"""websearch - search engine Web API for XMLMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_107_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SPIRES</subfield>
<subfield code="a">4066995</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EP-99-060</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">CERN Library</subfield>
<subfield code="a">EP-1999-060</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="9">SCAN-9910048</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="a">CERN-L3-175</subfield>
</datafield>
<datafield tag="110" ind1=" " ind2=" ">
<subfield code="a">CERN. Geneva</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Limits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">1999</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">26 Apr 1999</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">18 p</subfield>
</datafield>
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">ALEPH Papers</subfield>
</datafield>
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">Preprint not submitted to publication</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Experiment</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">CERN</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">PREPRINT</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">ALEPH</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">DELPHI</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">L3</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">OPAL</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">MEDLINE</subfield>
<subfield code="a">searches Higgs bosons</subfield>
</datafield>
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LexiHiggs</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="5">EP</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ALEPH Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">DELPHI Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">L3 Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">LEP Working Group for Higgs Boson Searches</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">OPAL Collaboration</subfield>
</datafield>
<datafield tag="901" ind1=" " ind2=" ">
<subfield code="u">CERN</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">199941</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">11</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">000330309CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">112828</subfield>
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">151257</subfield>
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">[email protected]</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<controlfield tag="005">%(rec_1_rev)s</controlfield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EX-0106015</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">ALEPH experiment: Candidate of Higgs boson production</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2="1">
<subfield code="a">Expérience ALEPH: Candidat de la production d'un boson Higgs</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">14 06 2000</subfield>
</datafield>
<datafield tag="340" ind1=" " ind2=" ">
<subfield code="a">FILM</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Candidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Experiments and Tracks</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="a">LEP</subfield>
</datafield>
<datafield tag="856" ind1="0" ind2=" ">
<subfield code="f">[email protected]</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">1585244</subfield>
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.jpg</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">20954</subfield>
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon</subfield>
<subfield code="x">icon</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="o">0003717PHOPHO</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="y">2000</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="b">81</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="1">
<subfield code="c">2001-06-14</subfield>
<subfield code="l">50</subfield>
<subfield code="m">2001-08-27</subfield>
<subfield code="o">CM</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="p">Bldg. 2</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="r">Calder, N</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="S">
<subfield code="s">n</subfield>
<subfield code="w">200231</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">PICTURE</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]}))
def test_search_engine_web_api_xmlmarc_field_filtered(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,700',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>"""))
def test_search_engine_web_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,595',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>"""))
def test_search_engine_web_api_xmlmarc_field_filtered_hidden_admin(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,595',
username='admin',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">No authors</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">CERN-EP</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">OA</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:200740 PR/LKR not found (from SLAC, INSPEC)</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">LANL EDS</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS LANLPUBL2004</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:2004 PR/LKR added</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">Press</subfield>
</datafield>
</record>
</collection>"""))
class WebSearchRecordWebAPITest(InvenioTestCase):
"""Check typical /record Web API calls on the demo data."""
def test_record_web_api_textmarc_full(self):
"""websearch - /record Web API for TextMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm',
expected_text="""\
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$s112828$$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 8564_ $$s151257$$u%(siteurl)s/record/85/files/0212181.pdf
000000085 859__ [email protected]
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
""" % {'siteurl': CFG_SITE_URL,
'rec_85_rev': get_fieldvalues(85, '005__')[0]}))
def test_record_web_api_xmlmarc_full(self):
"""websearch - /record Web API for XMLMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">112828</subfield>
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="s">151257</subfield>
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">[email protected]</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_85_rev': get_fieldvalues(85, '005__')[0]}))
def test_record_web_api_textmarc_field_filtered(self):
"""websearch - /record Web API for TextMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,700',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
"""))
def test_record_web_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - /record Web API for TextMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,595',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
"""))
def test_record_web_api_textmarc_field_filtered_hidden_admin(self):
"""websearch - /record Web API for TextMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,595',
username='admin',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 595__ $$aLANL EDS
000000085 595__ $$aSIS LANLPUBL2004
000000085 595__ $$aSIS:2004 PR/LKR added
"""))
def test_record_web_api_xmlmarc_field_filtered(self):
"""websearch - /record Web API for XMLMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,700',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - /record Web API for XMLMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,595',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_xmlmarc_field_filtered_hidden_admin(self):
"""websearch - /record Web API for XMLMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,595',
username='admin',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">LANL EDS</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS LANLPUBL2004</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:2004 PR/LKR added</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_textmarc_subfield_values(self):
"""websearch - /record Web API for TextMARC output, subfield values"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=700__a',
expected_text="""\
Porrati, Massimo
Zaffaroni, A
"""))
class WebSearchRestrictedCollectionTest(InvenioTestCase):
"""Test of the restricted collections behaviour."""
def test_restricted_collection_interface_page(self):
"""websearch - restricted collection interface page body"""
# there should be no Latest additions box for restricted collections
self.assertNotEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/Theses',
expected_text="Latest additions"))
def test_restricted_search_as_anonymous_guest(self):
"""websearch - restricted collection not searchable by anonymous guest"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
response = browser.response().read()
if response.find("If you think you have right to access it, please authenticate yourself.") > -1:
pass
else:
self.fail("Oops, searching restricted collection without password should have redirected to login dialog.")
return
def test_restricted_search_as_authorized_person(self):
"""websearch - restricted collection searchable by authorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
if browser.response().read().find("records found") > -1:
pass
else:
self.fail("Oops, Dr. Jekyll should be able to search Theses collection.")
def test_restricted_search_as_unauthorized_person(self):
"""websearch - restricted collection not searchable by unauthorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
browser.select_form(nr=0)
browser['p_un'] = 'hyde'
browser['p_pw'] = 'h123yde'
browser.submit()
# Mr. Hyde should not be able to connect:
if browser.response().read().find("Authorization failure") <= -1:
# if we got here, things are broken:
self.fail("Oops, Mr.Hyde should not be able to search Theses collection.")
def test_restricted_detailed_record_page_as_anonymous_guest(self):
"""websearch - restricted detailed record page not accessible to guests"""
browser = Browser()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
if browser.response().read().find("You can use your nickname or your email address to login.") > -1:
pass
else:
self.fail("Oops, searching restricted collection without password should have redirected to login dialog.")
return
def test_restricted_detailed_record_page_as_authorized_person(self):
"""websearch - restricted detailed record page accessible to authorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/youraccount/login')
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
# Dr. Jekyll should be able to connect
# (add the pw to the whole CFG_SITE_URL because we shall be
# redirected to '/reordrestricted/'):
if browser.response().read().find("A High-performance Video Browsing System") > -1:
pass
else:
self.fail("Oops, Dr. Jekyll should be able to access restricted detailed record page.")
def test_restricted_detailed_record_page_as_unauthorized_person(self):
"""websearch - restricted detailed record page not accessible to unauthorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/youraccount/login')
browser.select_form(nr=0)
browser['p_un'] = 'hyde'
browser['p_pw'] = 'h123yde'
browser.submit()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
# Mr. Hyde should not be able to connect:
if browser.response().read().find('You are not authorized') <= -1:
# if we got here, things are broken:
self.fail("Oops, Mr.Hyde should not be able to access restricted detailed record page.")
def test_collection_restricted_p(self):
"""websearch - collection_restricted_p"""
self.failUnless(collection_restricted_p('Theses'), True)
self.failIf(collection_restricted_p('Books & Reports'))
def test_get_permitted_restricted_collections(self):
"""websearch - get_permitted_restricted_collections"""
from invenio.webuser import get_uid_from_email, collect_user_info
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('[email protected]'))), ['Theses', 'Drafts'])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('[email protected]'))), [])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('[email protected]'))), ['ALEPH Theses', 'ALEPH Internal Notes', 'Atlantis Times Drafts'])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('[email protected]'))), ['ISOLDE Internal Notes'])
def test_restricted_record_has_restriction_flag(self):
"""websearch - restricted record displays a restriction flag"""
browser = Browser()
browser.open(CFG_SITE_URL + '/%s/42/files/' % CFG_SITE_RECORD)
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
if browser.response().read().find("Restricted") > -1:
pass
else:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
browser.open(CFG_SITE_URL + '/%s/42/files/comments' % CFG_SITE_RECORD)
if browser.response().read().find("Restricted") > -1:
pass
else:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
# Flag also appear on records that exist both in a public and
# restricted collection:
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/109' % CFG_SITE_RECORD,
username='admin',
password='',
expected_text=['Restricted'])
if error_messages:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
class WebSearchRestrictedCollectionHandlingTest(InvenioTestCase):
"""
Check how the restricted or restricted and "hidden" collection
handling works: (i)user has or not rights to access to specific
records or collections, (ii)public and restricted results are displayed
in the right position in the collection tree, (iii)display the right
warning depending on the case.
Changes in the collection tree used for testing (are showed the records used for testing as well):
Articles & Preprints Books & Reports
_____________|________________ ____________|_____________
| | | | | | |
Articles Drafts(r) Notes Preprints Books Theses(r) Reports
69 77 109 10 105
77 98 98
108 105
CERN Experiments
_________________________|___________________________
| |
ALEPH ISOLDE
_________________|_________________ ____________|_____________
| | | | |
ALEPH ALEPH ALEPH ISOLDE ISOLDE
Papers Internal Notes(r) Theses(r) Papers Internal Notes(r&h)
10 109 105 69 110
108 106
Authorized users:
jekyll -> Drafts, Theses
balthasar -> ALEPH Internal Notes, ALEPH Theses
dorian -> ISOLDE Internal Notes
"""
def test_show_public_colls_in_warning_as_unauthorizad_user(self):
"""websearch - show public daugther collections in warning to unauthorized user"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=recid:20',
username='hyde',
password='h123yde',
expected_text=['No match found in collection <em>Articles, Preprints, Notes</em>.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_show_public_and_restricted_colls_in_warning_as_authorized_user(self):
"""websearch - show public and restricted daugther collections in warning to authorized user"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=recid:20',
username='jekyll',
password='j123ekyll',
expected_text=['No match found in collection <em>Articles, Preprints, Notes, Drafts</em>.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_unauthorized_user(self):
"""websearch - record belongs to different restricted collections with different rights, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=105&f=recid',
username='hyde',
password='h123yde',
expected_text=['No public collection matched your query.'],
unexpected_text=['records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_one_coll(self):
"""websearch - record belongs to different restricted collections with different rights, balthasar has rights to one of them"""
from invenio.config import CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if policy == 'ANY':
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['[CERN-THESIS-99-074]'],
unexpected_text=['No public collection matched your query.'])
else:
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['No public collection matched your query.'],
unexpected_text=['[CERN-THESIS-99-074]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_two_colls(self):
"""websearch - record belongs to different restricted collections with different rights, jekyll has rights to two of them"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='jekyll',
password='j123ekyll',
expected_text=['Articles & Preprints', 'Books & Reports'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_all_colls(self):
"""websearch - record belongs to different restricted collections with different rights, admin has rights to all of them"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
expected_text=['Articles & Preprints', 'Books & Reports', 'ALEPH Theses'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_not_dad_coll(self):
"""websearch - record belongs to different restricted collections with different rights, search from a not dad collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Multimedia+%26+Arts&sc=1&p=recid%3A105&f=&action_search=Search&c=Pictures&c=Poetry&c=Atlantis+Times',
username='admin',
expected_text='No match found in collection',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_as_unauthorized_user(self):
"""websearch - record belongs to different public and restricted collections, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=geometry&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id&so=a',
username='guest',
expected_text='[80, 86]',
unexpected_text='[40, 80, 86]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_as_authorized_user(self):
"""websearch - record belongs to different public and restricted collections, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=geometry&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id&so=a',
username='admin',
password='',
expected_text='[40, 80, 86]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_of_focus_as_unauthorized_user(self):
"""websearch - record belongs to both a public and a restricted collection of "focus on", user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=109&f=recid',
username='hyde',
password='h123yde',
expected_text=['No public collection matched your query'],
unexpected_text=['LEP Center-of-Mass Energies in Presence of Opposite'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_of_focus_as_authorized_user(self):
"""websearch - record belongs to both a public and a restricted collection of "focus on", user has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=109&f=recid&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['Articles & Preprints', 'ALEPH Internal Notes', 'LEP Center-of-Mass Energies in Presence of Opposite'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_public_and_restricted_record_from_not_dad_coll_as_authorized_user(self):
"""websearch - record belongs to both a public and a restricted collection, search from a not dad collection, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A98&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text='No match found in collection <em>Books, Theses, Reports</em>',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_public_and_restricted_record_from_not_dad_coll_as_unauthorized_user(self):
"""websearch - record belongs to both a public and a restricted collection, search from a not dad collection, hyde not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A98&f=&action_search=Search&c=Books&c=Reports',
username='hyde',
password='h123yde',
expected_text='No public collection matched your query',
unexpected_text='No match found in collection')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_of_focus_as_authorized_user(self):
"""websearch - record belongs to a restricted collection of "focus on", balthasar has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=106&f=recid&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id',
username='balthasar',
password='b123althasar',
expected_text='[106]',
unexpected_text='[]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_display_dad_coll_of_restricted_coll_as_unauthorized_user(self):
"""websearch - unauthorized user displays a collection that contains a restricted collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=&f=&action_search=Search&c=Articles&c=Drafts&c=Preprints',
username='guest',
expected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_display_dad_coll_of_restricted_coll_as_authorized_user(self):
"""websearch - authorized user displays a collection that contains a restricted collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=&f=&action_search=Search&c=Articles&c=Drafts&c=Notes&c=Preprints',
username='jekyll',
password='j123ekyll',
expected_text=['Articles', 'Drafts', 'Notes', 'Preprints'],
unexpected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_coll_of_focus_as_unauthorized_user(self):
"""websearch - search for a record that belongs to a restricted collection from a collection of "focus on" , jekyll not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=CERN+Divisions&sc=1&p=recid%3A106&f=&action_search=Search&c=Experimental+Physics+(EP)&c=Theoretical+Physics+(TH)',
username='jekyll',
password='j123ekyll',
expected_text=['No public collection matched your query.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_coll_of_focus_as_authorized_user(self):
"""websearch - search for a record that belongs to a restricted collection from a collection of "focus on" , admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=CERN+Divisions&sc=1&p=recid%3A106&f=&action_search=Search&c=Experimental+Physics+(EP)&c=Theoretical+Physics+(TH)',
username='admin',
password='',
expected_text='No match found in collection <em>Experimental Physics (EP), Theoretical Physics (TH)</em>.',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_not_direct_dad_coll_and_display_in_right_position_in_tree(self):
"""websearch - search for a restricted record from not direct dad collection and display it on its right position in the tree"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A40&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['Books & Reports','[LBL-22304]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_direct_dad_coll_and_display_in_right_position_in_tree(self):
"""websearch - search for a restricted record from the direct dad collection and display it on its right position in the tree"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A40&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text=['Theses', '[LBL-22304]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_and_hidden_record_as_unauthorized_user(self):
"""websearch - search for a "hidden" record, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A110&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='guest',
expected_text=['If you were looking for a non-public document'],
unexpected_text=['If you were looking for a hidden document'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_and_hidden_record_as_authorized_user(self):
"""websearch - search for a "hidden" record, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A110&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['If you were looking for a hidden document, please type the correct URL for this record.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_enter_url_of_restricted_and_hidden_coll_as_unauthorized_user(self):
"""websearch - unauthorized user types the concret URL of a "hidden" collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=ISOLDE+Internal+Notes&sc=1&p=&f=&action_search=Search',
username='guest',
expected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_enter_url_of_restricted_and_hidden_coll_as_authorized_user(self):
"""websearch - authorized user types the concret URL of a "hidden" collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=ISOLDE+Internal+Notes&sc=1&p=&f=&action_search=Search',
username='dorian',
password='d123orian',
expected_text=['ISOLDE Internal Notes', '[CERN-PS-PA-Note-93-04]'],
unexpected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_the_top_as_unauthorized_user(self):
"""websearch - unauthorized user searches for a pattern from the top"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=of&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='guest',
expected_text=['Articles & Preprints', '61', 'records found',
'Books & Reports', '2', 'records found',
'Multimedia & Arts', '14', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_the_top_as_authorized_user(self):
"""websearch - authorized user searches for a pattern from the top"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=of&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['Articles & Preprints', '61', 'records found',
'Books & Reports', '6', 'records found',
'Multimedia & Arts', '14', 'records found',
'ALEPH Theses', '1', 'records found',
'ALEPH Internal Notes', '1', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_an_specific_coll_as_unauthorized_user(self):
"""websearch - unauthorized user searches for a pattern from one specific collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=of&f=&action_search=Search&c=Books&c=Reports',
username='guest',
expected_text=['Books', '1', 'records found',
'Reports', '1', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_an_specific_coll_as_authorized_user(self):
"""websearch - authorized user searches for a pattern from one specific collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=of&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text=['Books', '1', 'records found',
'Reports', '1', 'records found',
'Theses', '4', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchRestrictedPicturesTest(InvenioTestCase):
"""
Check whether restricted pictures on the demo site can be accessed
well by people who have rights to access them.
"""
def test_restricted_pictures_guest(self):
"""websearch - restricted pictures not available to guest"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_pictures_romeo(self):
"""websearch - restricted pictures available to Romeo"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
username='romeo',
password='r123omeo',
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_pictures_hyde(self):
"""websearch - restricted pictures not available to Mr. Hyde"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
username='hyde',
password='h123yde',
expected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.failUnless("HTTP Error 401: Unauthorized" in merge_error_messages(error_messages))
class WebSearchRestrictedWebJournalFilesTest(InvenioTestCase):
"""
Check whether files attached to a WebJournal article are well
accessible when the article is published
"""
def test_restricted_files_guest(self):
"""websearch - files of unreleased articles are not available to guest"""
# Record is not public...
self.assertEqual(record_public_p(112), False)
# ... and guest cannot access attached files
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/112/files/journal_galapagos_archipelago.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_files_editor(self):
"""websearch - files of unreleased articles are available to editor"""
# Record is not public...
self.assertEqual(record_public_p(112), False)
# ... but editor can access attached files
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/112/files/journal_galapagos_archipelago.jpg' % CFG_SITE_RECORD,
username='balthasar',
password='b123althasar',
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_files_guest(self):
"""websearch - files of released articles are available to guest"""
# Record is not public...
self.assertEqual(record_public_p(111), False)
# ... but user can access attached files, as article is released
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/111/files/journal_scissor_beak.jpg' % CFG_SITE_RECORD,
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_really_restricted_files_guest(self):
"""websearch - restricted files of released articles are not available to guest"""
# Record is not public...
self.assertEqual(record_public_p(111), False)
# ... and user cannot access restricted attachements, even if
# article is released
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/111/files/restricted-journal_scissor_beak.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_picture_has_restriction_flag(self):
"""websearch - restricted files displays a restriction flag"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/' % CFG_SITE_RECORD,
expected_text="Restricted")
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchRSSFeedServiceTest(InvenioTestCase):
"""Test of the RSS feed service."""
def test_rss_feed_service(self):
"""websearch - RSS feed service"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/rss',
expected_text='<rss version="2.0"'))
class WebSearchXSSVulnerabilityTest(InvenioTestCase):
"""Test possible XSS vulnerabilities of the search engine."""
def test_xss_in_collection_interface_page(self):
"""websearch - no XSS vulnerability in collection interface pages"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/?c=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Collection &lt;SCRIPT&gt;alert("XSS");&lt;/SCRIPT&gt; Not Found'))
def test_xss_in_collection_search_page(self):
"""websearch - no XSS vulnerability in collection search pages"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?c=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Collection <SCRIPT>alert("XSS");</SCRIPT> Not Found'))
def test_xss_in_simple_search(self):
"""websearch - no XSS vulnerability in simple search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Search term <em><SCRIPT>alert("XSS");</SCRIPT></em> did not match any record.'))
def test_xss_in_structured_search(self):
"""websearch - no XSS vulnerability in structured search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='No word index is available for <em><script>alert("xss");</script></em>.'))
def test_xss_in_advanced_search(self):
"""websearch - no XSS vulnerability in advanced search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?as=1&p1=ellis&f1=author&op1=a&p2=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f2=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&m2=e',
expected_text='Search term <em><SCRIPT>alert("XSS");</SCRIPT></em> inside index <em><script>alert("xss");</script></em> did not match any record.'))
def test_xss_in_browse(self):
"""websearch - no XSS vulnerability in browse"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&action_browse=Browse',
expected_text='<SCRIPT>alert("XSS");</SCRIPT>'))
class WebSearchResultsOverview(InvenioTestCase):
"""Test of the search results page's Results overview box and links."""
def test_results_overview_split_off(self):
"""websearch - results overview box when split by collection is off"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?p=of&sc=0')
body = browser.response().read()
if body.find("Results overview") > -1:
self.fail("Oops, when split by collection is off, "
"results overview should not be present.")
if body.find('<a name="1"></a>') == -1:
self.fail("Oops, when split by collection is off, "
"Atlantis collection should be found.")
if body.find('<a name="15"></a>') > -1:
self.fail("Oops, when split by collection is off, "
"Multimedia & Arts should not be found.")
try:
browser.find_link(url='#15')
self.fail("Oops, when split by collection is off, "
"a link to Multimedia & Arts should not be found.")
except LinkNotFoundError:
pass
def test_results_overview_split_on(self):
"""websearch - results overview box when split by collection is on"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?p=of&sc=1')
body = browser.response().read()
if body.find("Results overview") == -1:
self.fail("Oops, when split by collection is on, "
"results overview should be present.")
if body.find('<a name="Atlantis%20Institute%20of%20Fictive%20Science"></a>') > -1:
self.fail("Oops, when split by collection is on, "
"Atlantis collection should not be found.")
if body.find('<a name="15"></a>') == -1:
self.fail("Oops, when split by collection is on, "
"Multimedia & Arts should be found.")
try:
browser.find_link(url='#15')
except LinkNotFoundError:
self.fail("Oops, when split by collection is on, "
"a link to Multimedia & Arts should be found.")
class WebSearchSortResultsTest(InvenioTestCase):
"""Test of the search results page's sorting capability."""
def test_sort_results_default(self):
"""websearch - search results sorting, default method"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=3',
expected_text="CMS animation of the high-energy collisions"))
def test_sort_results_ascending(self):
"""websearch - search results sorting, ascending field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=2&sf=reportnumber&so=a',
expected_text="[astro-ph/0104076]"))
def test_sort_results_descending(self):
"""websearch - search results sorting, descending field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=1&sf=reportnumber&so=d',
expected_text=" [TESLA-FEL-99-07]"))
def test_sort_results_sort_pattern(self):
"""websearch - search results sorting, preferential sort pattern"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=1&sf=reportnumber&so=d&sp=cern',
expected_text="[CERN-TH-2002-069]"))
class WebSearchSearchResultsXML(InvenioTestCase):
"""Test search results in various output"""
def test_search_results_xm_output_split_on(self):
""" websearch - check document element of search results in xm output (split by collection on)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=1&of=xm')
body = browser.response().read()
num_doc_element = body.count("<collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xm_output_split_off(self):
""" websearch - check document element of search results in xm output (split by collection off)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=0&of=xm')
body = browser.response().read()
num_doc_element = body.count("<collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xd_output_split_on(self):
""" websearch - check document element of search results in xd output (split by collection on)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=1&of=xd')
body = browser.response().read()
num_doc_element = body.count("<collection")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xd_output_split_off(self):
""" websearch - check document element of search results in xd output (split by collection off)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=0&of=xd')
body = browser.response().read()
num_doc_element = body.count("<collection>")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
class WebSearchUnicodeQueryTest(InvenioTestCase):
"""Test of the search results for queries containing Unicode characters."""
def test_unicode_word_query(self):
"""websearch - Unicode word query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%CE%99%CE%B8%CE%AC%CE%BA%CE%B7',
expected_text="[76]"))
def test_unicode_word_query_not_found_term(self):
"""websearch - Unicode word query, not found term"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3A%CE%99%CE%B8',
expected_text="ιθάκη"))
def test_unicode_exact_phrase_query(self):
"""websearch - Unicode exact phrase query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%22%CE%99%CE%B8%CE%AC%CE%BA%CE%B7%22',
expected_text="[76]"))
def test_unicode_partial_phrase_query(self):
"""websearch - Unicode partial phrase query"""
# no hit here for example title partial phrase query due to
# removed difference between double-quoted and single-quoted
# search:
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%27%CE%B7%27',
expected_text="[]"))
def test_unicode_regexp_query(self):
"""websearch - Unicode regexp query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%2F%CE%B7%2F',
expected_text="[76]"))
class WebSearchMARCQueryTest(InvenioTestCase):
"""Test of the search results for queries containing physical MARC tags."""
def test_single_marc_tag_exact_phrase_query(self):
"""websearch - single MARC tag, exact phrase query (100__a)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=100__a%3A%22Ellis%2C+J%22&so=a',
expected_text="[9, 14, 18]"))
def test_single_marc_tag_partial_phrase_query(self):
"""websearch - single MARC tag, partial phrase query (245__b)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245__b%3A%27and%27',
expected_text="[28]"))
def test_many_marc_tags_partial_phrase_query(self):
"""websearch - many MARC tags, partial phrase query (245)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245%3A%27and%27&rg=100&so=a',
expected_text="[1, 8, 9, 14, 15, 20, 22, 24, 28, 33, 47, 48, 49, 51, 53, 64, 69, 71, 79, 82, 83, 85, 91, 96, 108]"))
def test_single_marc_tag_regexp_query(self):
"""websearch - single MARC tag, regexp query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245%3A%2Fand%2F&rg=100&so=a',
expected_text="[1, 8, 9, 14, 15, 20, 22, 24, 28, 33, 47, 48, 49, 51, 53, 64, 69, 71, 79, 82, 83, 85, 91, 96, 108]"))
class WebSearchExtSysnoQueryTest(InvenioTestCase):
"""Test of queries using external system numbers."""
def test_existing_sysno_html_output(self):
"""websearch - external sysno query, existing sysno, HTML output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CER',
expected_text="The wall of the cave"))
def test_existing_sysno_id_output(self):
"""websearch - external sysno query, existing sysno, ID output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CER&of=id',
expected_text="[95]"))
def test_nonexisting_sysno_html_output(self):
"""websearch - external sysno query, non-existing sysno, HTML output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CERRRR',
expected_text="Requested record does not seem to exist."))
def test_nonexisting_sysno_id_output(self):
"""websearch - external sysno query, non-existing sysno, ID output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CERRRR&of=id',
expected_text="[]"))
class WebSearchResultsRecordGroupingTest(InvenioTestCase):
"""Test search results page record grouping (rg)."""
def test_search_results_rg_guest(self):
"""websearch - search results, records in groups of, guest"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?rg=17',
expected_text="1 - 17"))
def test_search_results_rg_nonguest(self):
"""websearch - search results, records in groups of, non-guest"""
# This test used to fail due to saved user preference fetching
# not overridden by URL rg argument.
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?rg=17',
username='admin',
expected_text="1 - 17"))
class WebSearchSpecialTermsQueryTest(InvenioTestCase):
"""Test of the search results for queries containing special terms."""
def test_special_terms_u1(self):
"""websearch - query for special terms, U(1)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29',
expected_text="[88, 80, 79, 57]"))
def test_special_terms_u1_and_sl(self):
"""websearch - query for special terms, U(1) SL(2,Z)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29+SL%282%2CZ%29',
expected_text="[88]"))
def test_special_terms_u1_and_sl_or(self):
"""websearch - query for special terms, U(1) OR SL(2,Z)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29+OR+SL%282%2CZ%29',
expected_text="[88, 80, 79, 57]"))
@nottest
def FIXME_TICKET_453_test_special_terms_u1_and_sl_or_parens(self):
"""websearch - query for special terms, (U(1) OR SL(2,Z))"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=%28U%281%29+OR+SL%282%2CZ%29%29',
expected_text="[57, 79, 80, 88]"))
def test_special_terms_u1_and_sl_in_quotes(self):
"""websearch - query for special terms, ('SL(2,Z)' OR 'U(1)')"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + "/search?of=id&p=%28%27SL%282%2CZ%29%27+OR+%27U%281%29%27%29",
expected_text="[96, 88, 80, 79, 57]"))
class WebSearchJournalQueryTest(InvenioTestCase):
"""Test of the search results for journal pubinfo queries."""
def test_query_journal_title_only(self):
"""websearch - journal publication info query, title only"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B&so=a',
expected_text="[78, 85, 87]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B&so=a',
username='admin',
expected_text="[77, 78, 85, 87]"))
def test_query_journal_full_pubinfo(self):
"""websearch - journal publication info query, full reference"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B+531+%282002%29+301',
expected_text="[78]"))
class WebSearchStemmedIndexQueryTest(InvenioTestCase):
"""Test of the search results for queries using stemmed indexes."""
def test_query_stemmed_lowercase(self):
"""websearch - stemmed index query, lowercase"""
# note that dasse/Dasse is stemmed into dass/Dass, as expected
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=dasse',
expected_text="[26, 25]"))
def test_query_stemmed_uppercase(self):
"""websearch - stemmed index query, uppercase"""
# ... but note also that DASSE is stemmed into DASSE(!); so
# the test would fail if the search engine would not lower the
# query term. (Something that is not necessary for
# non-stemmed indexes.)
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=DASSE',
expected_text="[26, 25]"))
class WebSearchSummarizerTest(InvenioTestCase):
"""Test of the search results summarizer functions."""
def test_most_popular_field_values_singletag(self):
"""websearch - most popular field values, simple tag"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('PREPRINT', 37), ('ARTICLE', 28), ('BOOK', 14), ('THESIS', 8), ('PICTURE', 7),
('DRAFT', 2), ('POETRY', 2), ('REPORT', 2), ('ALEPHPAPER', 1), ('ATLANTISTIMESNEWS', 1),
('ISOLDEPAPER', 1)],
get_most_popular_field_values(range(0,100), '980__a'))
def test_most_popular_field_values_singletag_multiexclusion(self):
"""websearch - most popular field values, simple tag, multiple exclusions"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('PREPRINT', 37), ('ARTICLE', 28), ('BOOK', 14), ('DRAFT', 2), ('REPORT', 2),
('ALEPHPAPER', 1), ('ATLANTISTIMESNEWS', 1), ('ISOLDEPAPER', 1)],
get_most_popular_field_values(range(0,100), '980__a', ('THESIS', 'PICTURE', 'POETRY')))
def test_most_popular_field_values_multitag(self):
"""websearch - most popular field values, multiple tags"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('Ellis, J', 3), ('Enqvist, K', 1), ('Ibanez, L E', 1), ('Nanopoulos, D V', 1), ('Ross, G G', 1)],
get_most_popular_field_values((9, 14, 18), ('100__a', '700__a')))
def test_most_popular_field_values_multitag_singleexclusion(self):
"""websearch - most popular field values, multiple tags, single exclusion"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('Enqvist, K', 1), ('Ibanez, L E', 1), ('Nanopoulos, D V', 1), ('Ross, G G', 1)],
get_most_popular_field_values((9, 14, 18), ('100__a', '700__a'), ('Ellis, J')))
def test_most_popular_field_values_multitag_countrepetitive(self):
"""websearch - most popular field values, multiple tags, counting repetitive occurrences"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('THESIS', 2), ('REPORT', 1)],
get_most_popular_field_values((41,), ('690C_a', '980__a'), count_repetitive_values=True))
self.assertEqual([('REPORT', 1), ('THESIS', 1)],
get_most_popular_field_values((41,), ('690C_a', '980__a'), count_repetitive_values=False))
def test_ellis_citation_summary(self):
"""websearch - query ellis, citation summary output format"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_BASE_URL+"/search?p=ellis%20AND%20cited%3A1-%3E9",
expected_link_label='1'))
def test_ellis_not_quark_citation_summary_advanced(self):
"""websearch - ellis and not quark, citation summary format advanced"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&as=1&m1=a&p1=ellis&f1=author&op1=n&m2=a&p2=quark&f2=&op2=a&m3=a&p3=&f3=&action_search=Search&sf=&so=a&rm=&rg=10&sc=1&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_BASE_URL+'/search?p=author%3Aellis%20and%20not%20quark%20AND%20cited%3A1-%3E9',
expected_link_label='1'))
def test_ellis_not_quark_citation_summary_regular(self):
"""websearch - ellis and not quark, citation summary format advanced"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=author%3Aellis+and+not+quark&f=&action_search=Search&sf=&so=d&rm=&rg=10&sc=0&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_BASE_URL+'/search?p=author%3Aellis%20and%20not%20quark%20AND%20cited%3A1-%3E9',
expected_link_label='1'))
class WebSearchRecordCollectionGuessTest(InvenioTestCase):
"""Primary collection guessing tests."""
def test_guess_primary_collection_of_a_record(self):
"""websearch - guess_primary_collection_of_a_record"""
self.assertEqual(guess_primary_collection_of_a_record(96), 'Articles')
def test_guess_collection_of_a_record(self):
"""websearch - guess_collection_of_a_record"""
self.assertEqual(guess_collection_of_a_record(96), 'Articles')
self.assertEqual(guess_collection_of_a_record(96, '%s/collection/Theoretical Physics (TH)?ln=en' % CFG_SITE_URL), 'Articles')
self.assertEqual(guess_collection_of_a_record(12, '%s/collection/Theoretical Physics (TH)?ln=en' % CFG_SITE_URL), 'Theoretical Physics (TH)')
self.assertEqual(guess_collection_of_a_record(12, '%s/collection/Theoretical%%20Physics%%20%%28TH%%29?ln=en' % CFG_SITE_URL), 'Theoretical Physics (TH)')
class WebSearchGetFieldValuesTest(InvenioTestCase):
"""Testing get_fieldvalues() function."""
def test_get_fieldvalues_001(self):
"""websearch - get_fieldvalues() for bibxxx-agnostic tags"""
self.assertEqual(get_fieldvalues(10, '001___'), ['10'])
def test_get_fieldvalues_980(self):
"""websearch - get_fieldvalues() for bibxxx-powered tags"""
self.assertEqual(get_fieldvalues(18, '700__a'), ['Enqvist, K', 'Nanopoulos, D V'])
self.assertEqual(get_fieldvalues(18, '909C1u'), ['CERN'])
def test_get_fieldvalues_wildcard(self):
"""websearch - get_fieldvalues() for tag wildcards"""
self.assertEqual(get_fieldvalues(18, '%'), [])
self.assertEqual(get_fieldvalues(18, '7%'), [])
self.assertEqual(get_fieldvalues(18, '700%'), ['Enqvist, K', 'Nanopoulos, D V'])
self.assertEqual(get_fieldvalues(18, '909C0%'), ['1985', '13','TH'])
def test_get_fieldvalues_recIDs(self):
"""websearch - get_fieldvalues() for list of recIDs"""
self.assertEqual(get_fieldvalues([], '001___'), [])
self.assertEqual(get_fieldvalues([], '700__a'), [])
self.assertEqual(get_fieldvalues([10, 13], '001___'), ['10', '13'])
self.assertEqual(get_fieldvalues([18, 13], '700__a'),
['Dawson, S', 'Ellis, R K', 'Enqvist, K', 'Nanopoulos, D V'])
def test_get_fieldvalues_repetitive(self):
"""websearch - get_fieldvalues() for repetitive values"""
self.assertEqual(get_fieldvalues([17, 18], '909C1u'),
['CERN', 'CERN'])
self.assertEqual(get_fieldvalues([17, 18], '909C1u', repetitive_values=True),
['CERN', 'CERN'])
self.assertEqual(get_fieldvalues([17, 18], '909C1u', repetitive_values=False),
['CERN'])
class WebSearchAddToBasketTest(InvenioTestCase):
"""Test of the add-to-basket presence depending on user rights."""
def test_add_to_basket_guest(self):
"""websearch - add-to-basket facility allowed for guests"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='Add to basket'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='<input name="recid" type="checkbox" value="10" />'))
def test_add_to_basket_jekyll(self):
"""websearch - add-to-basket facility allowed for Dr. Jekyll"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='Add to basket',
username='jekyll',
password='j123ekyll'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='<input name="recid" type="checkbox" value="10" />',
username='jekyll',
password='j123ekyll'))
def test_add_to_basket_hyde(self):
"""websearch - add-to-basket facility denied to Mr. Hyde"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
unexpected_text='Add to basket',
username='hyde',
password='h123yde'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
unexpected_text='<input name="recid" type="checkbox" value="10" />',
username='hyde',
password='h123yde'))
class WebSearchAlertTeaserTest(InvenioTestCase):
"""Test of the alert teaser presence depending on user rights."""
def test_alert_teaser_guest(self):
"""websearch - alert teaser allowed for guests"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_link_label='email alert'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed'))
def test_alert_teaser_jekyll(self):
"""websearch - alert teaser allowed for Dr. Jekyll"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='email alert',
username='jekyll',
password='j123ekyll'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed',
username='jekyll',
password='j123ekyll'))
def test_alert_teaser_hyde(self):
"""websearch - alert teaser allowed for Mr. Hyde"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='email alert',
username='hyde',
password='h123yde'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed',
username='hyde',
password='h123yde'))
class WebSearchSpanQueryTest(InvenioTestCase):
"""Test of span queries."""
def test_span_in_word_index(self):
"""websearch - span query in a word index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=year%3A1992-%3E1996&of=id&ap=0',
expected_text='[71, 69, 66, 17]'))
def test_span_in_phrase_index(self):
"""websearch - span query in a phrase index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=year%3A%221992%22-%3E%221996%22&of=id&ap=0',
expected_text='[71, 69, 66, 17]'))
def test_span_in_bibxxx(self):
"""websearch - span query in MARC tables"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=909C0y%3A%221992%22-%3E%221996%22&of=id&ap=0',
expected_text='[71, 69, 66, 17]'))
def test_span_with_spaces(self):
"""websearch - no span query when a space is around"""
# useful for reaction search
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3A%27mu%20--%3E%20e%27&of=id&ap=0',
expected_text='[67]'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=245%3A%27mu%20--%3E%20e%27&of=id&ap=0',
expected_text='[67]'))
def test_span_in_author(self):
"""websearch - span query in special author index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis,%20K%22-%3E%22Ellis,%20RZ%22&of=id&ap=0',
expected_text='[47, 18, 17, 14, 13, 12, 11, 9, 8]'))
class WebSearchReferstoCitedbyTest(InvenioTestCase):
"""Test of refersto/citedby search operators."""
def test_refersto_recid(self):
'websearch - refersto:recid:84'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Arecid%3A84&of=id&ap=0&so=a',
expected_text='[85, 88, 91]'))
def test_refersto_repno(self):
'websearch - refersto:reportnumber:hep-th/0205061'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Areportnumber%3Ahep-th/0205061&of=id&ap=0',
expected_text='[91]'))
def test_refersto_author_word(self):
'websearch - refersto:author:klebanov'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Aauthor%3Aklebanov&of=id&ap=0&so=a',
expected_text='[85, 86, 88, 91]'))
def test_refersto_author_phrase(self):
'websearch - refersto:author:"Klebanov, I"'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Aauthor%3A%22Klebanov,%20I%22&of=id&ap=0&so=a',
expected_text='[85, 86, 88, 91]'))
def test_citedby_recid(self):
'websearch - citedby:recid:92'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Arecid%3A92&of=id&ap=0&so=a',
expected_text='[74, 91]'))
def test_citedby_repno(self):
'websearch - citedby:reportnumber:hep-th/0205061'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Areportnumber%3Ahep-th/0205061&of=id&ap=0',
expected_text='[78]'))
def test_citedby_author_word(self):
'websearch - citedby:author:klebanov'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Aauthor%3Aklebanov&of=id&ap=0',
expected_text='[95]'))
def test_citedby_author_phrase(self):
'websearch - citedby:author:"Klebanov, I"'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Aauthor%3A%22Klebanov,%20I%22&of=id&ap=0',
expected_text='[95]'))
def test_refersto_bad_query(self):
'websearch - refersto:title:'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Atitle%3A',
expected_text='There are no records referring to title:.'))
def test_citedby_bad_query(self):
'websearch - citedby:title:'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Atitle%3A',
expected_text='There are no records cited by title:.'))
class WebSearchSPIRESSyntaxTest(InvenioTestCase):
"""Test of SPIRES syntax issues"""
if CFG_WEBSEARCH_SPIRES_SYNTAX > 0:
def test_and_not_parens(self):
'websearch - find a ellis, j and not a enqvist'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL +'/search?p=find+a+ellis%2C+j+and+not+a+enqvist&of=id&ap=0&so=a',
expected_text='[9, 12, 14, 47]'))
if DATEUTIL_AVAILABLE:
def test_dadd_search(self):
'websearch - find da > today - 3650'
# XXX: assumes we've reinstalled our site in the last 10 years
# should return every document in the system
self.assertEqual([],
test_web_page_content(CFG_SITE_URL +'/search?ln=en&p=find+da+%3E+today+-+3650&f=&of=id&so=a&rg=0',
expected_text='[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 107, 108, 113, 127, 128]'))
class WebSearchDateQueryTest(InvenioTestCase):
"""Test various date queries."""
def setUp(self):
"""Establish variables we plan to re-use"""
self.empty = intbitset()
def test_search_unit_hits_for_datecreated_previous_millenia(self):
"""websearch - search_unit with datecreated returns >0 hits for docs in the last 1000 years"""
self.assertNotEqual(self.empty, search_unit('1000-01-01->9999-12-31', 'datecreated'))
def test_search_unit_hits_for_datemodified_previous_millenia(self):
"""websearch - search_unit with datemodified returns >0 hits for docs in the last 1000 years"""
self.assertNotEqual(self.empty, search_unit('1000-01-01->9999-12-31', 'datemodified'))
def test_search_unit_in_bibrec_for_datecreated_previous_millenia(self):
"""websearch - search_unit_in_bibrec with creationdate gets >0 hits for past 1000 years"""
self.assertNotEqual(self.empty, search_unit_in_bibrec("1000-01-01", "9999-12-31", 'creationdate'))
def test_search_unit_in_bibrec_for_datecreated_next_millenia(self):
"""websearch - search_unit_in_bibrec with creationdate gets 0 hits for after year 3000"""
self.assertEqual(self.empty, search_unit_in_bibrec("3000-01-01", "9999-12-31", 'creationdate'))
class WebSearchSynonymQueryTest(InvenioTestCase):
"""Test of queries using synonyms."""
def test_journal_phrvd(self):
"""websearch - search-time synonym search, journal title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=PHRVD&f=journal&of=id',
expected_text="[72, 66]"))
def test_journal_phrvd_54_1996_4234(self):
"""websearch - search-time synonym search, journal article"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=PHRVD%2054%20%281996%29%204234&f=journal&of=id',
expected_text="[66]"))
def test_journal_beta_decay_title(self):
"""websearch - index-time synonym search, beta decay in title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta+decay&f=title&of=id',
expected_text="[59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2+decay&f=title&of=id',
expected_text="[59]"))
def test_journal_beta_decay_global(self):
"""websearch - index-time synonym search, beta decay in any field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta+decay&of=id',
expected_text="[59, 52]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2+decay&of=id',
expected_text="[59]"))
def test_journal_beta_title(self):
"""websearch - index-time synonym search, beta in title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta&f=title&of=id',
expected_text="[59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2&f=title&of=id',
expected_text="[59]"))
def test_journal_beta_global(self):
"""websearch - index-time synonym search, beta in any field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta&of=id',
expected_text="[59, 52]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2&of=id',
expected_text="[59]"))
class WebSearchWashCollectionsTest(InvenioTestCase):
"""Test if the collection argument is washed correctly"""
def test_wash_coll_when_coll_restricted(self):
"""websearch - washing of restricted daughter collections"""
self.assertEqual(
sorted(wash_colls(cc='', c=['Books & Reports', 'Theses'])[1]),
['Books & Reports', 'Theses'])
self.assertEqual(
sorted(wash_colls(cc='', c=['Books & Reports', 'Theses'])[2]),
['Books & Reports', 'Theses'])
class WebSearchAuthorCountQueryTest(InvenioTestCase):
"""Test of queries using authorcount fields."""
def test_journal_authorcount_word(self):
"""websearch - author count, word query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=4&f=authorcount&of=id&so=a',
expected_text="[51, 54, 59, 66, 92, 96]"))
def test_journal_authorcount_phrase(self):
"""websearch - author count, phrase query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%224%22&f=authorcount&of=id&so=a',
expected_text="[51, 54, 59, 66, 92, 96]"))
def test_journal_authorcount_span(self):
"""websearch - author count, span query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=authorcount%3A9-%3E16&of=id&so=a',
expected_text="[69, 71, 127]"))
def test_journal_authorcount_plus(self):
"""websearch - author count, plus query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=50%2B&f=authorcount&of=id&so=a',
expected_text="[10, 17]"))
class WebSearchItemCountQueryTest(InvenioTestCase):
"""Test of queries using itemcount field/index"""
def test_itemcount_plus(self):
"""websearch - item count, search for more than one item, using 'plus'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=2%2B&f=itemcount&of=id&so=a',
expected_text="[31, 32, 34]"))
def test_itemcount_span(self):
"""websearch - item count, search for more than one item, using 'span'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=2->10&f=itemcount&of=id&so=a',
expected_text="[31, 32, 34]"))
def test_itemcount_phrase(self):
"""websearch - item count, search for records with exactly two items, phrase"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%222%22&f=itemcount&of=id&so=a',
expected_text="[31, 34]"))
def test_itemcount_records_with_two_items(self):
"""websearch - item count, search for records with exactly two items"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=2&f=itemcount&of=id&so=a',
expected_text="[31, 34]"))
class WebSearchFiletypeQueryTest(InvenioTestCase):
"""Test of queries using filetype fields."""
def test_mpg_filetype(self):
"""websearch - file type, query for tif extension"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=mpg&f=filetype&of=id',
expected_text="[113]"))
def test_tif_filetype_and_word_study(self):
"""websearch - file type, query for tif extension and word 'study'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=study+filetype%3Atif&of=id',
expected_text="[71]"))
def test_pdf_filetype_and_phrase(self):
"""websearch - file type, query for pdf extension and phrase 'parameter test'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=filetype%3Apdf+parameter+test&of=id&so=a',
expected_text="[50, 93]"))
class WebSearchFilenameQueryTest(InvenioTestCase):
"""Test of queries using filename fields."""
def test_search_for_main_file_name(self):
"""websearch - file name, query for name without extension"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=0402130&f=filename&of=id',
expected_text="[89]"))
def test_search_for_file_name_with_extension(self):
"""websearch - file name, query for name with extension"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=0210075.ps.gz&f=filename&of=id',
expected_text="[83]"))
def test_search_for_file_name_with_part_of_extension(self):
"""websearch - file name, query for name and part of extension"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=filename:0212138.ps&of=id&so=a',
expected_text="[84]"))
def test_search_for_file_name_with_wildcard(self):
"""websearch - file name, query with wildcard"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=filename:convert*&of=id&so=a',
expected_text="[66, 71, 97]"))
def test_search_for_file_name_with_span(self):
"""websearch - file name, query with span"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=filename:6->7&of=id&so=a',
expected_text="[3, 6]"))
def make_fake_request(admin_user=True):
environ = {'wsgi.errors': cStringIO.StringIO(),
'QUERY_STRING': '',
'PATH_INFO': ''}
if admin_user:
user_info = {'uid': 1, 'guest': '0', 'email': '', 'nickname': ''}
else:
user_info = {'uid': 2, 'guest': '1', 'email': '', 'nickname': ''}
user_info['precached_permitted_restricted_collections'] = get_permitted_restricted_collections(user_info)
buf = cStringIO.StringIO()
def start_response(status, response_headers, exc_info=None):
return buf.write
req = SimulatedModPythonRequest(environ, start_response)
req._user_info = user_info
req.test_output_buffer = buf
return req
class WebSearchPerformRequestSearchRefactoringTest(InvenioTestCase):
"""Tests the perform request search API after refactoring."""
def _run_test(self, test_args, expected_results):
params = {}
params.update(map(lambda y: (y[0], ',' in y[1] and ', ' not in y[1] and y[1].split(',') or y[1]), map(lambda x: x.split('=', 1), test_args.split(';'))))
if isinstance(expected_results, str):
req = make_fake_request()
params['req'] = req
recs = perform_request_search(**params)
if isinstance(expected_results, str):
recs = req.test_output_buffer.getvalue()
# this is just used to generate the results from the seearch engine before refactoring
#if recs != expected_results:
# print test_args
# print params
# print recs
self.assertEqual(recs, expected_results, "Error, we expect: %s, and we received: %s" % (expected_results, recs))
def test_queries(self):
"""websearch - testing p_r_s standard arguments and their combinations"""
self._run_test('p=ellis;f=author;action=Search', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47])
self._run_test('p=ellis;f=author;sf=title;action=Search', [8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ellis;f=author;sf=title;wl=5;action=Search', [8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ellis;f=author;sf=title;wl=5;so=a', [8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ellis;f=author;sf=title;wl=5;so=d', [13, 47, 10, 12, 18, 17, 11, 9, 14, 16, 8])
self._run_test('p=ell*;sf=title;wl=5', [8, 15, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ell*;sf=title;wl=1', [10])
self._run_test('p=ell*;sf=title;wl=100', [8, 15, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=muon OR kaon;f=author;sf=title;wl=5;action=Search', [])
self._run_test('p=muon OR kaon;sf=title;wl=5;action=Search', [67, 12])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Articles,Preprints', [67, 12])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Articles', [67])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Preprints', [12])
self._run_test('p=el*;rm=citation;so=a', [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23, 30, 32, 34, 47, 48, 51, 52, 54, 56, 58, 59, 92, 97, 100, 103, 109, 127, 128, 18, 74, 91, 94, 81])
self._run_test('p=el*;rm=citation;so=d', list(reversed([2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23, 30, 32, 34, 47, 48, 51, 52, 54, 56, 58, 59, 92, 97, 100, 103, 109, 127, 128, 18, 74, 91, 94, 81])))
if not get_external_word_similarity_ranker():
self._run_test('p=el*;rm=wrd', [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 30, 32, 34, 47, 48, 51, 52, 54, 56, 58, 59, 74, 81, 91, 92, 94, 97, 100, 103, 109, 127, 128])
self._run_test('p=el*;sf=title', [100, 32, 8, 15, 16, 81, 97, 34, 23, 127, 58, 2, 14, 9, 128, 11, 30, 109, 52, 48, 94, 17, 56, 18, 91, 59, 12, 92, 74, 54, 103, 10, 51, 47, 13])
self._run_test('p=boson;rm=citation', [1, 47, 50, 107, 108, 77, 95])
if not get_external_word_similarity_ranker():
self._run_test('p=boson;rm=wrd', [47, 50, 77, 95, 108, 1, 107])
self._run_test('p1=ellis;f1=author;m1=a;op1=a;p2=john;f2=author;m2=a', [9, 12, 14, 18])
self._run_test('p1=ellis;f1=author;m1=o;op1=a;p2=john;f2=author;m2=o', [9, 12, 14, 18])
self._run_test('p1=ellis;f1=author;m1=e;op1=a;p2=john;f2=author;m2=e', [])
self._run_test('p1=ellis;f1=author;m1=a;op1=o;p2=john;f2=author;m2=a', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47])
self._run_test('p1=ellis;f1=author;m1=o;op1=o;p2=john;f2=author;m2=o', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47])
self._run_test('p1=ellis;f1=author;m1=e;op1=o;p2=john;f2=author;m2=e', [])
self._run_test('p1=ellis;f1=author;m1=a;op1=n;p2=john;f2=author;m2=a', [8, 10, 11, 13, 16, 17, 47])
self._run_test('p1=ellis;f1=author;m1=o;op1=n;p2=john;f2=author;m2=o', [8, 10, 11, 13, 16, 17, 47])
self._run_test('p1=ellis;f1=author;m1=e;op1=n;p2=john;f2=author;m2=e', [])
self._run_test('p=Ellis, J;ap=1', [9, 10, 11, 12, 14, 17, 18, 47])
self._run_test('p=Ellis, J;ap=0', [9, 10, 11, 12, 14, 17, 18, 47])
self._run_test('p=recid:148x', [])
self._run_test('p=recid:148x;of=xm;rg=200', "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<collection xmlns=\"http://www.loc.gov/MARC21/slim\">\n\n</collection>")
class WebSearchDOIQueryTest(InvenioTestCase):
"""Tests queries using doi field."""
def test_span_doi_search(self):
"""websearch - doi, span query 1->9"""
errors = test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=doi%3A1->9&of=id',
expected_text="[128, 127, 96]")
self.assertEqual(True, errors == [])
def test_doi_wildcard(self):
"""websearch - doi, query for '10.1063%'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=doi%3A10.1063%25&of=id',
expected_text="[127]"))
def test_doi_negative_search(self):
"""websearch - doi, query for 'VDB:88636' """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=VDB%3A88636&f=doi&of=id',
expected_text="[]"))
class WebSearchGetRecordTests(InvenioTestCase):
def setUp(self):
self.recid = run_sql("INSERT INTO bibrec(creation_date, modification_date) VALUES(NOW(), NOW())")
def tearDown(self):
run_sql("DELETE FROM bibrec WHERE id=%s", (self.recid,))
def test_get_record(self):
"""bibformat - test print_record and get_record of empty record"""
from invenio.search_engine import print_record, get_record
self.assertEqual(print_record(self.recid, 'xm'), ' <record>\n <controlfield tag="001">%s</controlfield>\n </record>\n\n ' % self.recid)
self.assertEqual(get_record(self.recid), {'001': [([], ' ', ' ', str(self.recid), 1)]})
class WebSearchExactTitleIndexTest(InvenioTestCase):
"""Checks if exact title index works correctly """
def test_exacttitle_query_solves_problems(self):
"""websearch - check exacttitle query solves problems"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'solves+problems'&f=&action_search=Search",
expected_text = "Non-compact supergravity solves problems"))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_solve_problems(self):
"""websearch - check exacttitle query solve problems"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'solve+problems'&f=&action_search=Search",
expected_text = ['Search term', 'solve problems', 'did not match']))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_photon_beam(self):
"""websearch - check exacttitle search photon beam"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'photon+beam'&f=&action_search=Search",
expected_text = "Development of photon beam diagnostics"))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_photons_beam(self):
"""websearch - check exacttitle search photons beam"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'photons+beam'&f=&action_search=Search",
expected_text = ['Search term', 'photons beam', 'did not match']))
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchCustomCollectionBoxesName(InvenioTestCase):
"""Test if the custom collection box labels are correctly displayed"""
def test_custom_latest_additions_box_name(self):
"""websearch - test custom name for 'Latest additions' box in 'Videos' collection"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/Videos?ln=en',
expected_text='Latest videos:'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/Videos?ln=fr',
expected_text='Dernières vidéos:'))
# There is currently no translation for that box in Afrikaans:
# we must fall back to CFG_SITE_LANG
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/Videos?ln=af',
expected_text='Latest videos:'))
def test_custom_narrow_by_box_name(self):
"""websearch - test custom name for 'Narrow by' box in 'CERN Divisions' collection"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/CERN%20Divisions?ln=en',
expected_text='Browse by division:'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/CERN%20Divisions?ln=fr',
expected_text='Naviguer par division:'))
# There is currently no translation for that box in Afrikaans:
# we must fall back to CFG_SITE_LANG
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/CERN%20Divisions?ln=af',
expected_text='Browse by division:'))
class WebSearchDetailedRecordTabsTest(InvenioTestCase):
def test_detailed_record(self):
"""websearch - check detailed record main tab"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/81',
expected_text='Decapitating Tadpoles',
unexpected_text='The server encountered an error'))
def test_detailed_record_references_tab(self):
"""websearch - check detailed record references tab"""
expected_refs = [
'References (37)',
'W. Fischler and L. Susskind, "Dilaton Tadpoles, String Condensates And Scale In-variance,"',
'A. Adams, O. Aharony, J. McGreevy, E. Silverstein,..., work in progress',
]
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/81/references',
expected_text=expected_refs))
def test_detailed_record_citations_tab(self):
"""websearch - check detailed record citations tab"""
expected_cites = [
'Filtering Gravity: Modification at Large Distances?',
]
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/81/citations',
expected_text=expected_cites,
unexpected_text='The server encountered an error'))
def test_detailed_record_keywords_tab(self):
"""websearch - check detailed record keywords tab"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/record/81/keywords',
expected_text='Keywords',
unexpected_text='The server encountered an error'))
def test_detailed_record_comments_tab(self):
"""websearch - check detailed record comments tab"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/record/81/comments',
expected_text='Comments',
unexpected_text='The server encountered an error'))
def test_detailed_record_usage_tab(self):
"""websearch - check detailed record usage tab"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/record/81/usage',
expected_text='Usage statistics',
unexpected_text='The server encountered an error'))
def test_detailed_record_files_tab(self):
"""websearch - check detailed record files tab"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/record/81/files',
expected_text='Files',
unexpected_text='The server encountered an error'))
def test_detailed_record_plots_tab(self):
"""websearch - check detailed record plots tab"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/record/81/linkbacks',
expected_text='Plots',
unexpected_text='The server encountered an error'))
def test_detailed_record_holdings_tab(self):
"""websearch - check detailed record holdings tab"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/record/81/linkbacks',
expected_text='Holdings',
unexpected_text='The server encountered an error'))
def test_detailed_record_linkback_tab(self):
"""websearch - check detailed record linkback tab"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/record/81/linkbacks',
expected_text='Linkbacks',
unexpected_text='The server encountered an error'))
TEST_SUITE = make_test_suite(WebSearchWebPagesAvailabilityTest,
WebSearchTestSearch,
WebSearchTestBrowse,
WebSearchTestOpenURL,
WebSearchTestCollections,
WebSearchTestRecord,
WebSearchTestLegacyURLs,
WebSearchNearestTermsTest,
WebSearchBooleanQueryTest,
WebSearchAuthorQueryTest,
WebSearchSearchEnginePythonAPITest,
WebSearchSearchEngineWebAPITest,
WebSearchRecordWebAPITest,
WebSearchRestrictedCollectionTest,
WebSearchRestrictedCollectionHandlingTest,
WebSearchRestrictedPicturesTest,
WebSearchRestrictedWebJournalFilesTest,
WebSearchRSSFeedServiceTest,
WebSearchXSSVulnerabilityTest,
WebSearchResultsOverview,
WebSearchSortResultsTest,
WebSearchSearchResultsXML,
WebSearchUnicodeQueryTest,
WebSearchMARCQueryTest,
WebSearchExtSysnoQueryTest,
WebSearchResultsRecordGroupingTest,
WebSearchSpecialTermsQueryTest,
WebSearchJournalQueryTest,
WebSearchStemmedIndexQueryTest,
WebSearchSummarizerTest,
WebSearchRecordCollectionGuessTest,
WebSearchGetFieldValuesTest,
WebSearchAddToBasketTest,
WebSearchAlertTeaserTest,
WebSearchSpanQueryTest,
WebSearchReferstoCitedbyTest,
WebSearchSPIRESSyntaxTest,
WebSearchDateQueryTest,
WebSearchTestWildcardLimit,
WebSearchSynonymQueryTest,
WebSearchWashCollectionsTest,
WebSearchAuthorCountQueryTest,
WebSearchFiletypeQueryTest,
WebSearchFilenameQueryTest,
WebSearchDOIQueryTest,
WebSearchPerformRequestSearchRefactoringTest,
WebSearchGetRecordTests,
WebSearchExactTitleIndexTest,
WebSearchCJKTokenizedSearchTest,
WebSearchItemCountQueryTest,
WebSearchCustomCollectionBoxesName,
WebSearchDetailedRecordTabsTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| chokribr/inveniotest | modules/websearch/lib/websearch_regression_tests.py | Python | gpl-2.0 | 256,725 |
#=======================================================================
#
# Python Lexical Analyser
#
# Exception classes
#
#=======================================================================
import exceptions
class PlexError(exceptions.Exception):
message = ""
class PlexTypeError(PlexError, TypeError):
pass
class PlexValueError(PlexError, ValueError):
pass
class InvalidRegex(PlexError):
pass
class InvalidToken(PlexError):
def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
class InvalidScanner(PlexError):
pass
class AmbiguousAction(PlexError):
message = "Two tokens with different actions can match the same string"
def __init__(self):
pass
class UnrecognizedInput(PlexError):
scanner = None
position = None
state_name = None
def __init__(self, scanner, state_name):
self.scanner = scanner
self.position = scanner.position()
self.state_name = state_name
def __str__(self):
return ("'%s', line %d, char %d: Token not recognised in state %s"
% (self.position + (repr(self.state_name),)))
| onoga/wm | src/gnue/common/external/plex/Errors.py | Python | gpl-2.0 | 1,109 |
from miasm2.core.asmblock import disasmEngine
from miasm2.arch.msp430.arch import mn_msp430
class dis_msp430(disasmEngine):
def __init__(self, bs=None, **kwargs):
super(dis_msp430, self).__init__(mn_msp430, None, bs, **kwargs)
| stephengroat/miasm | miasm2/arch/msp430/disasm.py | Python | gpl-2.0 | 242 |
# -*- encoding: utf-8 -*-
from django.contrib import admin
from .models import ZoteroExtractorLog
### ZoteroExtractorLogAdmin
####################################################################################################
class ZoteroExtractorLogAdmin(admin.ModelAdmin):
model = ZoteroExtractorLog
list_display = ['item_key', 'version', 'timestamp', 'publication']
search_fields = ['item_key', 'version', 'publication__title', 'publication__slug']
####################################################################################################
####################################################################################################
### Register classes
####################################################################################################
####################################################################################################
admin.site.register(ZoteroExtractorLog, ZoteroExtractorLogAdmin)
| morelab/labman_ud | labman_ud/extractors/zotero/admin.py | Python | gpl-3.0 | 970 |
__VERSION__="ete2-2.2rev1026"
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
from clustertree import *
__all__ = clustertree.__all__
| zhangjiajie/tax_benchmark | script/ete2/clustering/__init__.py | Python | gpl-3.0 | 1,549 |
# Purpose: ac1009 table entries
# Created: 16.03.2011
# Copyright (C) 2011, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <[email protected]>"
from ..entity import GenericWrapper
from ..tags import DXFTag
from ..classifiedtags import ClassifiedTags
from ..dxfattr import DXFAttr, DXFAttributes, DefSubclass
_LAYERTEMPLATE = """ 0
LAYER
5
0
2
LAYERNAME
70
0
62
7
6
CONTINUOUS
"""
# noinspection PyAugmentAssignment,PyUnresolvedReferences
class Layer(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_LAYERTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'color': DXFAttr(62, None), # dxf color index, if < 0 layer is off
'linetype': DXFAttr(6, None),
}))
LOCK = 0b00000100
UNLOCK = 0b11111011
def is_locked(self):
return self.dxf.flags & Layer.LOCK > 0
def lock(self):
self.dxf.flags = self.dxf.flags | Layer.LOCK
def unlock(self):
self.dxf.flags = self.dxf.flags & Layer.UNLOCK
def is_off(self):
return self.dxf.color < 0
def is_on(self):
return not self.is_off()
def on(self):
self.dxf.color = abs(self.dxf.color)
def off(self):
self.dxf.color = -abs(self.dxf.color)
def get_color(self):
return abs(self.dxf.color)
def set_color(self, color):
color = abs(color) if self.is_on() else -abs(color)
self.dxf.color = color
_STYLETEMPLATE = """ 0
STYLE
5
0
2
STYLENAME
70
0
40
0.0
41
1.0
50
0.0
71
0
42
1.0
3
arial.ttf
4
"""
class Style(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_STYLETEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'height': DXFAttr(40, None), # fixed height, 0 if not fixed
'width': DXFAttr(41, None), # width factor
'oblique': DXFAttr(50, None), # oblique angle in degree, 0 = vertical
'text_generation_flags': DXFAttr(71, None), # 2 = backward, 4 = mirrored in Y
'last_height': DXFAttr(42, None), # last height used
'font': DXFAttr(3, None), # primary font file name
'bigfont': DXFAttr(4, None), # big font name, blank if none
}))
_LTYPETEMPLATE = """ 0
LTYPE
5
0
2
LTYPENAME
70
0
3
LTYPEDESCRIPTION
72
65
"""
class Linetype(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_LTYPETEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'description': DXFAttr(3, None),
'length': DXFAttr(40, None),
'items': DXFAttr(73, None),
}))
@classmethod
def new(cls, handle, dxfattribs=None, dxffactory=None):
if dxfattribs is not None:
pattern = dxfattribs.pop('pattern', [0.0])
else:
pattern = [0.0]
entity = super(Linetype, cls).new(handle, dxfattribs, dxffactory)
entity._setup_pattern(pattern)
return entity
def _setup_pattern(self, pattern):
self.tags.noclass.append(DXFTag(73, len(pattern) - 1))
self.tags.noclass.append(DXFTag(40, float(pattern[0])))
self.tags.noclass.extend((DXFTag(49, float(p)) for p in pattern[1:]))
_VPORTTEMPLATE = """ 0
VPORT
5
0
2
VPORTNAME
70
0
10
0.0
20
0.0
11
1.0
21
1.0
12
70.0
22
50.0
13
0.0
23
0.0
14
0.5
24
0.5
15
0.5
25
0.5
16
0.0
26
0.0
36
1.0
17
0.0
27
0.0
37
0.0
40
70.
41
1.34
42
50.0
43
0.0
44
0.0
50
0.0
51
0.0
71
0
72
1000
73
1
74
3
75
0
76
0
77
0
78
0
"""
class Viewport(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_VPORTTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'lower_left': DXFAttr(10, 'Point2D'),
'upper_right': DXFAttr(11, 'Point2D'),
'center_point': DXFAttr(12, 'Point2D'),
'snap_base': DXFAttr(13, 'Point2D'),
'snap_spacing': DXFAttr(14, 'Point2D'),
'grid_spacing': DXFAttr(15, 'Point2D'),
'direction_point': DXFAttr(16, 'Point3D'),
'target_point': DXFAttr(17, 'Point3D'),
'height': DXFAttr(40, None),
'aspect_ratio': DXFAttr(41, None),
'lens_length': DXFAttr(42, None),
'front_clipping': DXFAttr(43, None),
'back_clipping': DXFAttr(44, None),
'snap_rotation': DXFAttr(50, None),
'view_twist': DXFAttr(51, None),
'status': DXFAttr(68, None),
'id': DXFAttr(69, None),
'view_mode': DXFAttr(71, None),
'circle_zoom': DXFAttr(72, None),
'fast_zoom': DXFAttr(73, None),
'ucs_icon': DXFAttr(74, None),
'snap_on': DXFAttr(75, None),
'grid_on': DXFAttr(76, None),
'snap_style': DXFAttr(77, None),
'snap_isopair': DXFAttr(78, None),
}))
_UCSTEMPLATE = """ 0
UCS
5
0
2
UCSNAME
70
0
10
0.0
20
0.0
30
0.0
11
1.0
21
0.0
31
0.0
12
0.0
22
1.0
32
0.0
"""
class UCS(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_UCSTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'origin': DXFAttr(10, 'Point3D'),
'xaxis': DXFAttr(11, 'Point3D'),
'yaxis': DXFAttr(12, 'Point3D'),
}))
_APPIDTEMPLATE = """ 0
APPID
5
0
2
APPNAME
70
0
"""
class AppID(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_APPIDTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
}))
_VIEWTEMPLATE = """ 0
VIEW
5
0
2
VIEWNAME
70
0
10
0.0
20
0.0
11
1.0
21
1.0
31
1.0
12
0.0
22
0.0
32
0.0
40
70.
41
1.0
42
50.0
43
0.0
44
0.0
50
0.0
71
0
"""
class View(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_VIEWTEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(5, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'height': DXFAttr(40, None),
'width': DXFAttr(41, None),
'center_point': DXFAttr(10, 'Point2D'),
'direction_point': DXFAttr(11, 'Point3D'),
'target_point': DXFAttr(12, 'Point3D'),
'lens_length': DXFAttr(42, None),
'front_clipping': DXFAttr(43, None),
'back_clipping': DXFAttr(44, None),
'view_twist': DXFAttr(50, None),
'view_mode': DXFAttr(71, None),
}))
_DIMSTYLETEMPLATE = """ 0
DIMSTYLE
105
0
2
DIMSTYLENAME
70
0
3
4
5
6
7
40
1.0
41
3.0
42
2.0
43
9.0
44
5.0
45
0.0
46
0.0
47
0.0
48
0.0
140
3.0
141
2.0
142
0.0
143
25.399999999999999
144
1.0
145
0.0
146
1.0
147
2.0
71
0
72
0
73
1
74
1
75
0
76
0
77
0
78
0
170
0
171
2
172
0
173
0
174
0
175
0
176
0
177
0
178
0
"""
class DimStyle(GenericWrapper):
TEMPLATE = ClassifiedTags.from_text(_DIMSTYLETEMPLATE)
DXFATTRIBS = DXFAttributes(DefSubclass(None, {
'handle': DXFAttr(105, None),
'name': DXFAttr(2, None),
'flags': DXFAttr(70, None),
'dimpost': DXFAttr(3, None),
'dimapost': DXFAttr(4, None),
'dimblk': DXFAttr(5, None),
'dimblk1': DXFAttr(6, None),
'dimblk2': DXFAttr(7, None),
'dimscale': DXFAttr(40, None),
'dimasz': DXFAttr(41, None),
'dimexo': DXFAttr(42, None),
'dimdli': DXFAttr(43, None),
'dimexe': DXFAttr(44, None),
'dimrnd': DXFAttr(45, None),
'dimdle': DXFAttr(46, None),
'dimtp': DXFAttr(47, None),
'dimtm': DXFAttr(48, None),
'dimtxt': DXFAttr(140, None),
'dimcen': DXFAttr(141, None),
'dimtsz': DXFAttr(142, None),
'dimaltf': DXFAttr(143, None),
'dimlfac': DXFAttr(144, None),
'dimtvp': DXFAttr(145, None),
'dimtfac': DXFAttr(146, None),
'dimgap': DXFAttr(147, None),
'dimtol': DXFAttr(71, None),
'dimlim': DXFAttr(72, None),
'dimtih': DXFAttr(73, None),
'dimtoh': DXFAttr(74, None),
'dimse1': DXFAttr(75, None),
'dimse2': DXFAttr(76, None),
'dimtad': DXFAttr(77, None),
'dimzin': DXFAttr(78, None),
'dimalt': DXFAttr(170, None),
'dimaltd': DXFAttr(171, None),
'dimtofl': DXFAttr(172, None),
'dimsah': DXFAttr(173, None),
'dimtix': DXFAttr(174, None),
'dimsoxd': DXFAttr(175, None),
'dimclrd': DXFAttr(176, None),
'dimclre': DXFAttr(177, None),
'dimclrt': DXFAttr(178, None),
}))
| lautr3k/RepRap-iTopie | odmt/ezdxf/ac1009/tableentries.py | Python | gpl-3.0 | 8,896 |
#!/usr/bin/python
# python code for interfacing to VC0706 cameras and grabbing a photo
# pretty basic stuff
# written by ladyada. MIT license
# revisions for Raspberrry Pi by Gordon Rush
import serial
BAUD = 38400
# this is the port on the Raspberry Pi; it will be different for serial ports on other systems.
PORT = "/dev/ttyAMA0"
TIMEOUT = 0.5 # I needed a longer timeout than ladyada's 0.2 value
SERIALNUM = 0 # start with 0, each camera should have a unique ID.
COMMANDSEND = 0x56
COMMANDREPLY = 0x76
COMMANDEND = 0x00
CMD_GETVERSION = 0x11
CMD_RESET = 0x26
CMD_TAKEPHOTO = 0x36
CMD_READBUFF = 0x32
CMD_GETBUFFLEN = 0x34
FBUF_CURRENTFRAME = 0x00
FBUF_NEXTFRAME = 0x01
FBUF_STOPCURRENTFRAME = 0x00
getversioncommand = [COMMANDSEND, SERIALNUM, CMD_GETVERSION, COMMANDEND]
resetcommand = [COMMANDSEND, SERIALNUM, CMD_RESET, COMMANDEND]
takephotocommand = [COMMANDSEND, SERIALNUM, CMD_TAKEPHOTO, 0x01, FBUF_STOPCURRENTFRAME]
getbufflencommand = [COMMANDSEND, SERIALNUM, CMD_GETBUFFLEN, 0x01, FBUF_CURRENTFRAME]
def checkreply(r, b):
r = map( ord, r )
if( r[0] == COMMANDREPLY and r[1] == SERIALNUM and r[2] == b and r[3] == 0x00):
return True
return False
def reset():
cmd = ''.join( map( chr, resetcommand ) )
s.write(cmd)
reply = s.read(100)
r = list(reply)
if checkreply( r, CMD_RESET ):
return True
return False
def getversion():
cmd = ''.join( map( chr, getversioncommand ))
s.write(cmd)
reply = s.read(16)
r = list(reply)
# print r
if checkreply( r, CMD_GETVERSION ):
print r
return True
return False
def takephoto():
cmd = ''.join( map( chr, takephotocommand ))
s.write(cmd)
reply = s.read(5)
r = list(reply)
# print r
if( checkreply( r, CMD_TAKEPHOTO) and r[3] == chr(0x0)):
return True
return False
def getbufferlength():
cmd = ''.join( map( chr, getbufflencommand ))
s.write(cmd)
reply = s.read(9)
r = list(reply)
if( checkreply( r, CMD_GETBUFFLEN) and r[4] == chr(0x4)):
l = ord(r[5])
l <<= 8
l += ord(r[6])
l <<= 8
l += ord(r[7])
l <<= 8
l += ord(r[8])
return l
return 0
readphotocommand = [COMMANDSEND, SERIALNUM, CMD_READBUFF, 0x0c, FBUF_CURRENTFRAME, 0x0a]
def readbuffer(bytes):
addr = 0 # the initial offset into the frame buffer
photo = []
# bytes to read each time (must be a mutiple of 4)
inc = 8192
while( addr < bytes ):
# on the last read, we may need to read fewer bytes.
chunk = min( bytes-addr, inc );
# append 4 bytes that specify the offset into the frame buffer
command = readphotocommand + [(addr >> 24) & 0xff,
(addr>>16) & 0xff,
(addr>>8 ) & 0xff,
addr & 0xff]
# append 4 bytes that specify the data length to read
command += [(chunk >> 24) & 0xff,
(chunk>>16) & 0xff,
(chunk>>8 ) & 0xff,
chunk & 0xff]
# append the delay
command += [1,0]
# print map(hex, command)
print "Reading", chunk, "bytes at", addr
# make a string out of the command bytes.
cmd = ''.join(map(chr, command))
s.write(cmd)
# the reply is a 5-byte header, followed by the image data
# followed by the 5-byte header again.
reply = s.read(5+chunk+5)
# convert the tuple reply into a list
r = list(reply)
if( len(r) != 5+chunk+5 ):
# retry the read if we didn't get enough bytes back.
print "Read", len(r), "Retrying."
continue
if( not checkreply(r, CMD_READBUFF)):
print "ERROR READING PHOTO"
return
# append the data between the header data to photo
photo += r[5:chunk+5]
# advance the offset into the frame buffer
addr += chunk
print addr, "Bytes written"
return photo
######## main
s = serial.Serial( PORT, baudrate=BAUD, timeout = TIMEOUT )
reset()
if( not getversion() ):
print "Camera not found"
exit(0)
print "VC0706 Camera found"
if takephoto():
print "Snap!"
bytes = getbufferlength()
print bytes, "bytes to read"
photo = readbuffer( bytes )
f = open( "photo.jpg", 'w' )
photodata = ''.join( photo )
f.write( photodata )
f.close()
| felipehfj/Arduino | libraries/Adafruit_VC0706_Serial_Camera_Library/raspi_camera.py | Python | gpl-3.0 | 3,977 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-07 02:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='produce',
name='plu',
field=models.IntegerField(unique=True),
),
]
| kkoci/orthosie | inventory/migrations/0002_auto_20151206_2111.py | Python | gpl-3.0 | 438 |
from netfields import InetAddressField, CidrAddressField
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from nodeshot.core.base.models import BaseAccessLevel
from ..managers import NetAccessLevelManager
from choices import IP_PROTOCOLS
class Ip(BaseAccessLevel):
""" IP Address Model """
interface = models.ForeignKey('net.Interface', verbose_name=_('interface'))
address = InetAddressField(verbose_name=_('ip address'), unique=True, db_index=True)
protocol = models.CharField(_('IP Protocol Version'), max_length=4, choices=IP_PROTOCOLS, default=IP_PROTOCOLS[0][0], blank=True)
netmask = CidrAddressField(_('netmask (CIDR, eg: 10.40.0.0/24)'), blank=True, null=True)
objects = NetAccessLevelManager()
class Meta:
app_label = 'net'
permissions = (('can_view_ip', 'Can view ip'),)
verbose_name = _('ip address')
verbose_name_plural = _('ip addresses')
def __unicode__(self):
return '%s: %s' % (self.protocol, self.address)
def clean(self, *args, **kwargs):
""" TODO """
# netaddr.IPAddress('10.40.2.1') in netaddr.IPNetwork('10.40.0.0/24')
pass
def save(self, *args, **kwargs):
"""
Determines ip protocol version automatically.
Stores address in interface shortcuts for convenience.
"""
self.protocol = 'ipv%d' % self.address.version
# save
super(Ip, self).save(*args, **kwargs)
# TODO: do we really need this?
# save shortcut on interfaces
#ip_cached_list = self.interface.ip_addresses
## if not present in interface shorctus add it to the list
#if str(self.address) not in ip_cached_list:
# # recalculate cached_ip_list
# recalculated_ip_cached_list = []
# for ip in self.interface.ip_set.all():
# recalculated_ip_cached_list.append(str(ip.address))
# # rebuild string in format "<ip_1>, <ip_2>"
# self.interface.data['ip_addresses'] = recalculated_ip_cached_list
# self.interface.save()
@property
def owner(self):
return self.interface.owner
if 'grappelli' in settings.INSTALLED_APPS:
@staticmethod
def autocomplete_search_fields():
return ('address__icontains',)
| sephiroth6/nodeshot | nodeshot/networking/net/models/ip.py | Python | gpl-3.0 | 2,430 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-12 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Malware',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alert_id', models.CharField(max_length=90)),
('alert_type', models.CharField(max_length=80)),
('file_name', models.CharField(max_length=80)),
('computer', models.CharField(max_length=80)),
('contact_group', models.CharField(max_length=80)),
('virus', models.CharField(max_length=80)),
('actual_action', models.CharField(max_length=80)),
('comment', models.CharField(max_length=100)),
('numeric_ip', models.GenericIPAddressField(default='0.0.0.0', protocol='ipv4')),
],
),
]
| brj424/nector | malware/migrations/0001_initial.py | Python | gpl-3.0 | 1,104 |
# coding=utf-8
"""
Class used for representing tIntermediateCatchEvent of BPMN 2.0 graph
"""
import graph.classes.events.catch_event_type as catch_event
class IntermediateCatchEvent(catch_event.CatchEvent):
"""
Class used for representing tIntermediateCatchEvent of BPMN 2.0 graph
"""
def __init__(self):
"""
Default constructor, initializes object fields with new instances.
"""
super(IntermediateCatchEvent, self).__init__()
| krisss2121/bpmn-python | bpmn_python/graph/classes/events/intermediate_catch_event_type.py | Python | gpl-3.0 | 478 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <[email protected]>
# Copyright 2015-2018 Alexander Cogneau (acogneau) <[email protected]>:
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.autoupdate."""
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.misc import autoupdate, httpclient
INVALID_JSON = ['{"invalid": { "json"}', '{"wrong": "keys"}']
class HTTPGetStub(httpclient.HTTPClient):
"""A stub class for HTTPClient.
Attributes:
url: the last url used by get()
_success: Whether get() will emit a success signal.
"""
def __init__(self, success=True, json=None):
super().__init__()
self.url = None
self._success = success
if json:
self._json = json
else:
self._json = '{"info": {"version": "test"}}'
def get(self, url):
self.url = url
if self._success:
self.success.emit(self._json)
else:
self.error.emit("error")
def test_constructor(qapp):
client = autoupdate.PyPIVersionClient()
assert isinstance(client._client, httpclient.HTTPClient)
def test_get_version_success(qtbot):
"""Test get_version() when success is emitted."""
http_stub = HTTPGetStub(success=True)
client = autoupdate.PyPIVersionClient(client=http_stub)
with qtbot.assertNotEmitted(client.error):
with qtbot.waitSignal(client.success):
client.get_version('test')
assert http_stub.url == QUrl(client.API_URL.format('test'))
def test_get_version_error(qtbot):
"""Test get_version() when error is emitted."""
http_stub = HTTPGetStub(success=False)
client = autoupdate.PyPIVersionClient(client=http_stub)
with qtbot.assertNotEmitted(client.success):
with qtbot.waitSignal(client.error):
client.get_version('test')
@pytest.mark.parametrize('json', INVALID_JSON)
def test_invalid_json(qtbot, json):
"""Test on_client_success() with invalid JSON."""
http_stub = HTTPGetStub(json=json)
client = autoupdate.PyPIVersionClient(client=http_stub)
client.get_version('test')
with qtbot.assertNotEmitted(client.success):
with qtbot.waitSignal(client.error):
client.get_version('test')
| t-wissmann/qutebrowser | tests/unit/misc/test_autoupdate.py | Python | gpl-3.0 | 2,969 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
JWT tokens (for web interface, mostly, as all peer operations function on
public key cryptography)
JWT tokens can be one of:
* Good
* Expired
* Invalid
And granting them should not take database access. They are meant to
figure out if a user is auth'd without using the database to do so.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from ...utils.timing import TimedTestCase
from ..token import token, jwt_get, jwt_use
class test_token(TimedTestCase):
def test_good_token(self):
"""Valid JWT Token"""
self.threshold = .32
bob = token(u'bob')
example = bob.make(u'print')
bob.check(example)
def test_expired_token(self):
"""Expire a token..."""
self.threshold = .1
a = datetime.datetime.now()
assert a != None
def test_invalid_token(self):
"""Invalid Tokens"""
self.threshold = .1
fred = token(u'fred')
alice = token(u'alice')
wrong = fred.make(u'well then')
alice.check(wrong)
class test_jwt(TimedTestCase):
def test_routes(self):
self.threshold = .1
tok = jwt_get(u'ten')
res = jwt_use(tok)
print(res)
| Thetoxicarcade/ac | congredi/auth/test/test_token.py | Python | gpl-3.0 | 1,288 |
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# example summary provider for NSMachPort
# the real summary is now C++ code built into LLDB
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the port number of an NSMachPort, so they need not
# obey the interface specification for synthetic children providers
class NSMachPortKnown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
# one pointer is the ISA
# then we have one other internal pointer, plus
# 4 bytes worth of flags. hence, these values
def offset(self):
logger = lldb.formatters.Logger.Logger()
if self.sys_params.is_64_bit:
return 20
else:
return 12
def port(self):
logger = lldb.formatters.Logger.Logger()
vport = self.valobj.CreateChildAtOffset("port",
self.offset(),
self.sys_params.types_cache.NSUInteger)
return vport.GetValueAsUnsigned(0)
class NSMachPortUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
def port(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression("port","(int)[" + stream.GetData() + " machPort]")
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not NSMachPort>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data,wrapper =lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(valobj,statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSMachPort':
wrapper = NSMachPortKnown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun',valobj)
else:
wrapper = NSMachPortUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('unknown_class',valobj.GetName() + " seen as " + name_string)
return wrapper;
def NSMachPort_SummaryProvider (valobj,dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj);
if provider != None:
if isinstance(provider,lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.port();
except:
summary = None
logger >> "got summary " + str(summary)
if summary == None:
summary = '<variable is not NSMachPort>'
if isinstance(summary, basestring):
return summay
return 'mach port: ' + str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger,dict):
debugger.HandleCommand("type summary add -F NSMachPort.NSMachPort_SummaryProvider NSMachPort")
| s20121035/rk3288_android5.1_repo | external/lldb/examples/summaries/cocoa/NSMachPort.py | Python | gpl-3.0 | 3,980 |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/__init__.py
__version__=''' $Id$ '''
__doc__="""The Reportlab PDF generation library."""
Version = "2.7"
import sys
if sys.version_info[0:2] < (2, 7):
warning = """The trunk of reportlab currently requires Python 2.7 or higher.
This is being done to let us move forwards with 2.7/3.x compatibility
with the minimum of baggage.
ReportLab 2.7 was the last packaged version to suppo0rt Python 2.5 and 2.6.
Python 2.3 users may still use ReportLab 2.4 or any other bugfixes
derived from it, and Python 2.4 users may use ReportLab 2.5.
Python 2.2 and below need to use released versions beginning with
1.x (e.g. 1.21), or snapshots or checkouts from our 'version1' branch.
Our current plan is to remove Python 2.5 compatibility on our next release,
allowing us to use the 2to3 tool and work on Python 3.0 compatibility.
If you have a choice, Python 2.7.x is best long term version to use.
"""
raise ImportError("reportlab needs Python 2.5 or higher", warning)
def getStory(context):
"This is a helper for our old autogenerated documentation system"
if context.target == 'UserGuide':
# parse some local file
import os
myDir = os.path.split(__file__)[0]
import yaml
return yaml.parseFile(myDir + os.sep + 'mydocs.yaml')
else:
# this signals that it should revert to default processing
return None
def getMonitor():
import reportlab.monitor
mon = reportlab.monitor.ReportLabToolkitMonitor()
return mon
| TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/thirdparty/src/reportlab/__init__.py | Python | gpl-3.0 | 1,715 |
../../../../../../share/pyshared/ubuntuone-storage-protocol/ubuntuone/storageprotocol/delta.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/ubuntuone-storage-protocol/ubuntuone/storageprotocol/delta.py | Python | gpl-3.0 | 94 |
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import ROOT
from . import log; log = log[__name__]
from .. import QROOT, asrootpy
from ..base import NamedObject
from ..extern.six import string_types
__all__ = [
'DataSet',
]
class DataSet(NamedObject, QROOT.RooDataSet):
_ROOT = QROOT.RooDataSet
class Entry(object):
def __init__(self, idx, dataset):
self.idx_ = idx
self.dataset_ = dataset
@property
def fields(self):
return asrootpy(self.dataset_.get(self.idx_))
@property
def weight(self):
self.dataset_.get(self.idx_) #set current event
return self.dataset_.weight()
def __len__(self):
return self.numEntries()
def __getitem__(self, idx):
return DataSet.Entry(idx, self)
def __iter__(self):
for idx in range(len(self)):
yield DataSet.Entry(idx, self)
def createHistogram(self, *args, **kwargs):
if args and isinstance(args[0], string_types):
return ROOT.RooAbsData.createHistogram(self, *args, **kwargs)
return super(DataSet, self).createHistogram(*args, **kwargs)
def reduce(self, *args, **kwargs):
return asrootpy(super(DataSet, self).reduce(*args, **kwargs))
| kreczko/rootpy | rootpy/stats/dataset.py | Python | gpl-3.0 | 1,394 |
from __future__ import absolute_import, unicode_literals
from django.conf import settings as django_settings
SLUG = 'macros'
APP_LABEL = 'wiki'
METHODS = getattr(
django_settings,
'WIKI_PLUGINS_METHODS',
('article_list',
'toc',
))
| NablaWebkom/django-wiki | wiki/plugins/macros/settings.py | Python | gpl-3.0 | 256 |
if __name__ == "__main__":
try:
from mvc.ui.widgets import Application
except ImportError:
from mvc.ui.console import Application
from mvc.widgets import app
from mvc.widgets import initialize
app.widgetapp = Application()
initialize(app.widgetapp)
| pculture/mirovideoconverter3 | mvc/__main__.py | Python | gpl-3.0 | 289 |
'''
pysplat is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pysplat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
(C) 2016 by Thomas Pointhuber, <[email protected]>
''' | pointhi/PySplat | PySplat/util/__init__.py | Python | gpl-3.0 | 682 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.contrib.auth.models import BaseUserManager
from django.db.models import Q
from django.utils import timezone
from django.utils.lru_cache import lru_cache
from pootle_app.models.permissions import check_user_permission
from pootle_translationproject.models import TranslationProject
from . import utils
__all__ = ('UserManager', )
class UserManager(BaseUserManager):
"""Pootle User manager.
This manager hides the 'nobody' and 'default' users for normal
queries, since they are special users. Code that needs access to these
users should use the methods get_default_user and get_nobody_user.
"""
PERMISSION_USERS = ('default', 'nobody')
META_USERS = ('default', 'nobody', 'system')
def _create_user(self, username, email, password, is_superuser,
**extra_fields):
"""Creates and saves a User with the given username, email,
password and superuser status.
Adapted from the core ``auth.User`` model's ``UserManager``: we
have no use for the ``is_staff`` field.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
utils.validate_email_unique(email)
user = self.model(username=username, email=email,
is_active=True, is_superuser=is_superuser,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True,
**extra_fields)
@lru_cache()
def get_default_user(self):
return self.get_queryset().get(username='default')
@lru_cache()
def get_nobody_user(self):
return self.get_queryset().get(username='nobody')
@lru_cache()
def get_system_user(self):
return self.get_queryset().get(username='system')
def hide_permission_users(self):
return self.get_queryset().exclude(username__in=self.PERMISSION_USERS)
def hide_meta(self):
return self.get_queryset().exclude(username__in=self.META_USERS)
def meta_users(self):
return self.get_queryset().filter(username__in=self.META_USERS)
def get_users_with_permission(self, permission_code, project, language):
default = self.get_default_user()
directory = TranslationProject.objects.get(
project=project,
language=language
).directory
if check_user_permission(default, permission_code, directory):
return self.hide_meta().filter(is_active=True)
user_filter = Q(
permissionset__positive_permissions__codename=permission_code
)
language_path = language.directory.pootle_path
project_path = project.directory.pootle_path
user_filter &= (
Q(permissionset__directory__pootle_path=directory.pootle_path)
| Q(permissionset__directory__pootle_path=language_path)
| Q(permissionset__directory__pootle_path=project_path)
)
user_filter |= Q(is_superuser=True)
return self.get_queryset().filter(user_filter).distinct()
| Finntack/pootle | pootle/apps/accounts/managers.py | Python | gpl-3.0 | 3,836 |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream import ElementBase, StanzaBase, ET
from sleekxmpp.xmlstream import register_stanza_plugin
class Failure(StanzaBase):
"""
"""
name = 'failure'
namespace = 'urn:ietf:params:xml:ns:xmpp-sasl'
interfaces = set(('condition', 'text'))
plugin_attrib = name
sub_interfaces = set(('text',))
conditions = set(('aborted', 'account-disabled', 'credentials-expired',
'encryption-required', 'incorrect-encoding', 'invalid-authzid',
'invalid-mechanism', 'malformed-request', 'mechansism-too-weak',
'not-authorized', 'temporary-auth-failure'))
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup.
Sets a default error type and condition, and changes the
parent stanza's type to 'error'.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
# StanzaBase overrides self.namespace
self.namespace = Failure.namespace
if StanzaBase.setup(self, xml):
#If we had to generate XML then set default values.
self['condition'] = 'not-authorized'
self.xml.tag = self.tag_name()
def get_condition(self):
"""Return the condition element's name."""
for child in self.xml.getchildren():
if "{%s}" % self.namespace in child.tag:
cond = child.tag.split('}', 1)[-1]
if cond in self.conditions:
return cond
return 'not-authorized'
def set_condition(self, value):
"""
Set the tag name of the condition element.
Arguments:
value -- The tag name of the condition element.
"""
if value in self.conditions:
del self['condition']
self.xml.append(ET.Element("{%s}%s" % (self.namespace, value)))
return self
def del_condition(self):
"""Remove the condition element."""
for child in self.xml.getchildren():
if "{%s}" % self.condition_ns in child.tag:
tag = child.tag.split('}', 1)[-1]
if tag in self.conditions:
self.xml.remove(child)
return self
| TheGurke/Progenitus | sleekxmpp/features/feature_mechanisms/stanza/failure.py | Python | gpl-3.0 | 2,500 |
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
from io import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
from ._compat import string_types
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""An attempt was made to add a DNS SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
class IncompatibleTypes(dns.exception.DNSException):
"""An attempt was made to add DNS RR data of an incompatible type."""
class Rdataset(dns.set.Set):
"""A DNS rdataset."""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE, ttl=0):
"""Create a new rdataset of the specified class and type.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*covers*, an ``int``, the covered rdatatype.
*ttl*, an ``int``, the TTL.
"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = ttl
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Perform TTL minimization.
Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
*ttl*, an ``int``.
"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional *ttl* parameter is supplied, then
``self.update_ttl(ttl)`` will be called prior to adding the rdata.
*rd*, a ``dns.rdata.Rdata``, the rdata
*ttl*, an ``int``, the TTL.
Raises ``dns.rdataset.IncompatibleTypes`` if the type and class
do not match the type and class of the rdataset.
Raises ``dns.rdataset.DifferingCovers`` if the type is a signature
type and the covered type does not match that of the rdataset.
"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if ttl is not None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
*other*, a ``dns.rdataset.Rdataset``, the rdataset from which
to update.
"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
See ``dns.name.Name.choose_relativity`` for more information
on how *origin* and *relativize* determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
``to_text()`` method.
*name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with
*name* as the owner name.
*origin*, a ``dns.name.Name`` or ``None``, the origin for relative
names.
*relativize*, a ``bool``. If ``True``, names will be relativized
to *origin*.
"""
if name is not None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO()
if override_rdclass is not None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
s.write(u'%s%s%s %s\n' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype)))
else:
for rd in self:
s.write(u'%s%s%d %s %s %s\n' %
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize,
**kw)))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
*name*, a ``dns.name.Name`` is the owner name to use.
*file* is the file where the name is emitted (typically a
BytesIO file).
*compress*, a ``dict``, is the compression table to use. If
``None`` (the default), names will not be compressed.
*origin* is a ``dns.name.Name`` or ``None``. If the name is
relative and origin is not ``None``, then *origin* will be appended
to it.
*override_rdclass*, an ``int``, is used as the class instead of the
class of the rdataset. This is useful when rendering rdatasets
associated with dynamic updates.
*want_shuffle*, a ``bool``. If ``True``, then the order of the
Rdatas within the Rdataset will be shuffled before rendering.
Returns an ``int``, the number of records emitted.
"""
if override_rdclass is not None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns ``True`` if this rdataset matches the specified class,
type, and covers.
"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if isinstance(rdclass, string_types):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_rdata_list(ttl, rdatas)
| pbaesse/Sissens | lib/python2.7/site-packages/eventlet/support/dns/rdataset.py | Python | gpl-3.0 | 11,374 |
"""Update vulnerability sources."""
from selinon import StoragePool
from f8a_worker.base import BaseTask
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Ecosystem
from f8a_worker.solver import get_ecosystem_solver, OSSIndexDependencyParser
from f8a_worker.workers import CVEcheckerTask
class CVEDBSyncTask(BaseTask):
"""Update vulnerability sources."""
def components_to_scan(self, previous_sync_timestamp, only_already_scanned):
"""Get EPV that were recently updated in OSS Index, so they can contain new vulnerabilities.
Get components (e:p:v) that were recently (since previous_sync_timestamp) updated
in OSS Index, which means that they can contain new vulnerabilities.
:param previous_sync_timestamp: timestamp of previous check
:param only_already_scanned: include already scanned components only
:return: generator of e:p:v
"""
# TODO: reduce cyclomatic complexity
to_scan = []
rdb = StoragePool.get_connected_storage('BayesianPostgres')
for ecosystem in ['nuget']:
ecosystem_solver = get_ecosystem_solver(self.storage.get_ecosystem(ecosystem),
with_parser=OSSIndexDependencyParser())
self.log.debug("Retrieving new %s vulnerabilities from OSS Index", ecosystem)
ossindex_updated_packages = CVEcheckerTask.\
query_ossindex_vulnerability_fromtill(ecosystem=ecosystem,
from_time=previous_sync_timestamp)
for ossindex_updated_package in ossindex_updated_packages:
if Ecosystem.by_name(rdb.session, ecosystem).is_backed_by(EcosystemBackend.maven):
package_name = "{g}:{n}".format(g=ossindex_updated_package['group'],
n=ossindex_updated_package['name'])
else:
package_name = ossindex_updated_package['name']
package_affected_versions = set()
for vulnerability in ossindex_updated_package.get('vulnerabilities', []):
for version_string in vulnerability.get('versions', []):
try:
resolved_versions = ecosystem_solver.\
solve(["{} {}".format(package_name, version_string)],
all_versions=True)
except Exception:
self.log.exception("Failed to resolve %r for %s:%s", version_string,
ecosystem, package_name)
continue
resolved_versions = resolved_versions.get(package_name, [])
if only_already_scanned:
already_scanned_versions =\
[ver for ver in resolved_versions if
self.storage.get_analysis_count(ecosystem, package_name, ver) > 0]
package_affected_versions.update(already_scanned_versions)
else:
package_affected_versions.update(resolved_versions)
for version in package_affected_versions:
to_scan.append({
'ecosystem': ecosystem,
'name': package_name,
'version': version
})
msg = "Components to be {prefix}scanned for vulnerabilities: {components}".\
format(prefix="re-" if only_already_scanned else "",
components=to_scan)
self.log.info(msg)
return to_scan
def execute(self, arguments):
"""Start the task.
:param arguments: optional argument 'only_already_scanned' to run only
on already analysed packages
:return: EPV dict describing which packages should be analysed
"""
only_already_scanned = arguments.pop('only_already_scanned', True) if arguments else True
ignore_modification_time = (arguments.pop('ignore_modification_time', False)
if arguments else False)
CVEcheckerTask.update_victims_cve_db_on_s3()
self.log.debug('Updating sync associated metadata')
s3 = StoragePool.get_connected_storage('S3VulnDB')
previous_sync_timestamp = s3.update_sync_date()
if ignore_modification_time:
previous_sync_timestamp = 0
# get components which might have new vulnerabilities since previous sync
to_scan = self.components_to_scan(previous_sync_timestamp, only_already_scanned)
return {'modified': to_scan}
| miteshvp/fabric8-analytics-worker | f8a_worker/workers/cvedbsync.py | Python | gpl-3.0 | 4,809 |
# -*- coding: utf-8 -*
import os, StringIO, sys, traceback, tempfile, random, shutil
from status import OutputStatus
from sagenb.misc.format import format_for_pexpect
from worksheet_process import WorksheetProcess
from sagenb.misc.misc import (walltime,
set_restrictive_permissions, set_permissive_permissions)
import pexpect
###################################################################
# Expect-based implementation
###################################################################
class WorksheetProcess_ExpectImplementation(WorksheetProcess):
"""
A controlled Python process that executes code using expect.
INPUT:
- ``process_limits`` -- None or a ProcessLimits objects as defined by
the ``sagenb.interfaces.ProcessLimits`` object.
"""
def __init__(self,
process_limits = None,
timeout = 0.05,
python = 'python'):
"""
Initialize this worksheet process.
"""
self._output_status = OutputStatus('', [], True)
self._expect = None
self._is_started = False
self._is_computing = False
self._timeout = timeout
self._prompt = "__SAGE__"
self._filename = ''
self._all_tempdirs = []
self._process_limits = process_limits
self._max_walltime = None
self._start_walltime = None
self._data_dir = None
self._python = python
if process_limits:
u = ''
if process_limits.max_vmem is not None:
u += ' -v %s'%(int(process_limits.max_vmem)*1000)
if process_limits.max_cputime is not None:
u += ' -t %s'%(int(process_limits.max_cputime))
if process_limits.max_processes is not None:
u += ' -u %s'%(int(process_limits.max_processes))
# prepend ulimit options
if u == '':
self._ulimit = u
else:
self._ulimit = 'ulimit %s'%u
else:
self._ulimit = ''
if process_limits and process_limits.max_walltime:
self._max_walltime = process_limits.max_walltime
def command(self):
return self._python
# TODO: The following simply doesn't work -- this is not a valid way to run
# ulimited. Also we should check if ulimit is available before even
# doing this.
return '&&'.join([x for x in [self._ulimit, self._python] if x])
def __del__(self):
try: self._cleanup_tempfiles()
except: pass
try: self._cleanup_data_dir()
except: pass
def _cleanup_data_dir(self):
if self._data_dir is not None:
set_restrictive_permissions(self._data_dir)
def _cleanup_tempfiles(self):
for X in self._all_tempdirs:
try: shutil.rmtree(X, ignore_errors=True)
except: pass
def __repr__(self):
"""
Return string representation of this worksheet process.
"""
return "Pexpect implementation of worksheet process"
###########################################################
# Control the state of the subprocess
###########################################################
def interrupt(self):
"""
Send an interrupt signal to the currently running computation
in the controlled process. This may or may not succeed. Call
``self.is_computing()`` to find out if it did.
"""
if self._expect is None: return
try:
self._expect.sendline(chr(3))
except: pass
def quit(self):
"""
Quit this worksheet process.
"""
if self._expect is None: return
try:
self._expect.sendline(chr(3)) # send ctrl-c
self._expect.sendline('quit_sage()')
except:
pass
try:
os.killpg(self._expect.pid, 9)
os.kill(self._expect.pid, 9)
except OSError:
pass
self._expect = None
self._is_started = False
self._is_computing = False
self._start_walltime = None
self._cleanup_tempfiles()
self._cleanup_data_dir()
def start(self):
"""
Start this worksheet process running.
"""
#print "Starting worksheet with command: '%s'"%self.command()
self._expect = pexpect.spawn(self.command())
self._is_started = True
self._is_computing = False
self._number = 0
self._read()
self._start_walltime = walltime()
def update(self):
"""
This should be called periodically by the server processes.
It does things like checking for timeouts, etc.
"""
self._check_for_walltimeout()
def _check_for_walltimeout(self):
"""
Check if the walltimeout has been reached, and if so, kill
this worksheet process.
"""
if (self._is_started and \
self._max_walltime and self._start_walltime and \
walltime() - self._start_walltime > self._max_walltime):
self.quit()
###########################################################
# Query the state of the subprocess
###########################################################
def is_computing(self):
"""
Return True if a computation is currently running in this worksheet subprocess.
OUTPUT:
- ``bool``
"""
return self._is_computing
def is_started(self):
"""
Return true if this worksheet subprocess has already been started.
OUTPUT:
- ``bool``
"""
return self._is_started
###########################################################
# Sending a string to be executed in the subprocess
###########################################################
def get_tmpdir(self):
"""
Return two strings (local, remote), where local is the name
of a pre-created temporary directory, and remote is the name
of the same directory but on the machine on which the actual
worksheet process is running.
OUTPUT:
- local directory
- remote directory
"""
# In this implementation the remote process is just running
# as the same user on the local machine.
s = tempfile.mkdtemp()
return (s, s)
def execute(self, string, data=None):
"""
Start executing the given string in this subprocess.
INPUT:
- ``string`` -- a string containing code to be executed.
- ``data`` -- a string or None; if given, must specify an
absolute path on the server host filesystem. This may
be ignored by some worksheet process implementations.
"""
if self._expect is None:
self.start()
if self._expect is None:
raise RuntimeError, "unable to start subprocess using command '%s'"%self.command()
self._number += 1
local, remote = self.get_tmpdir()
if data is not None:
# make a symbolic link from the data directory into local tmp directory
self._data = os.path.split(data)[1]
self._data_dir = data
set_permissive_permissions(data)
os.symlink(data, os.path.join(local, self._data))
else:
self._data = ''
self._tempdir = local
sage_input = '_sage_input_%s.py'%self._number
self._filename = os.path.join(self._tempdir, sage_input)
self._so_far = ''
self._is_computing = True
self._all_tempdirs.append(self._tempdir)
open(self._filename,'w').write(format_for_pexpect(string, self._prompt,
self._number))
try:
self._expect.sendline('\nimport os;os.chdir("%s");\nexecfile("%s")'%(
remote, sage_input))
except OSError as msg:
self._is_computing = False
self._so_far = str(msg)
def _read(self):
try:
self._expect.expect(pexpect.EOF, self._timeout)
# got EOF subprocess must have crashed; cleanup
print "got EOF subprocess must have crashed..."
print self._expect.before
self.quit()
except:
pass
###########################################################
# Getting the output so far from a subprocess
###########################################################
def output_status(self):
"""
Return OutputStatus object, which includes output from the
subprocess from the last executed command up until now,
information about files that were created, and whether
computing is now done.
OUTPUT:
- ``OutputStatus`` object.
"""
self._read()
if self._expect is None:
self._is_computing = False
else:
self._so_far = self._expect.before
import re
v = re.findall('START%s.*%s'%(self._number,self._prompt), self._so_far, re.DOTALL)
if len(v) > 0:
self._is_computing = False
s = v[0][len('START%s'%self._number):-len(self._prompt)]
else:
v = re.findall('START%s.*'%self._number, self._so_far, re.DOTALL)
if len(v) > 0:
s = v[0][len('START%s'%self._number):]
else:
s = ''
if s.endswith(self._prompt):
s = s[:-len(self._prompt)]
files = []
if os.path.exists(self._tempdir):
files = [os.path.join(self._tempdir, x) for x in os.listdir(self._tempdir) if x != self._data]
files = [x for x in files if x != self._filename]
return OutputStatus(s, files, not self._is_computing)
class WorksheetProcess_RemoteExpectImplementation(WorksheetProcess_ExpectImplementation):
"""
This worksheet process class implements computation of worksheet
code as another user possibly on another machine, with the
following requirements:
1. ssh keys are setup for passwordless login from the server to the
remote user account, and
2. there is a shared filesystem that both users can write to,
which need not be mounted in the same location.
VULNERABILITIES: It is possible for a malicious user to see code
input by other notebook users whose processes are currently
running. However, the moment any calculation finishes, the file
results are moved back to the the notebook server in a protected
placed, and everything but the input file is deleted, so the
damage that can be done is limited. In particular, users can't
simply browse much from other users.
INPUT:
- ``user_at_host`` -- a string of the form 'username@host'
such that 'ssh user@host' does not require a password, e.g.,
setup by typing ``ssh-keygen`` as the notebook server and
worksheet users, then putting ~/.ssh/id_rsa.pub as the file
.ssh/authorized_keys. You must make the permissions of
files and directories right.
- ``local_directory`` -- (default: None) name of a directory on
the local computer that the notebook server can write to,
which the remote computer also has read/write access to. If
set to ``None``, then first try the environment variable
:envvar:`SAGENB_TMPDIR` if it exists, then :envvar:`TMPDIR`.
Otherwise, fall back to ``/tmp``.
- ``remote_directory`` -- (default: None) if the local_directory is
mounted on the remote machine as a different directory name,
this string is that directory name.
- ``process_limits`` -- None or a ProcessLimits objects as defined by
the ``sagenb.interfaces.ProcessLimits`` object.
"""
def __init__(self,
user_at_host,
remote_python,
local_directory = None,
remote_directory = None,
process_limits = None,
timeout = 0.05):
WorksheetProcess_ExpectImplementation.__init__(self, process_limits, timeout=timeout)
self._user_at_host = user_at_host
if local_directory is None:
local_directory = os.environ.get("SAGENB_TMPDIR")
if local_directory is None:
local_directory = os.environ.get("TMPDIR")
if local_directory is None:
local_directory = "/tmp"
self._local_directory = local_directory
if remote_directory is None:
remote_directory = local_directory
self._remote_directory = remote_directory
self._remote_python = remote_python
def command(self):
if self._ulimit == '':
c = self._remote_python
else:
c = '&&'.join([x for x in [self._ulimit, self._remote_python] if x])
return 'sage-native-execute ssh -t %s "%s"'%(self._user_at_host, c)
def get_tmpdir(self):
"""
Return two strings (local, remote), where local is the name
of a pre-created temporary directory, and remote is the name
of the same directory but on the machine on which the actual
worksheet process is running.
"""
# In this implementation the remote process is just running
# as the same user on the local machine.
local = tempfile.mkdtemp(dir=self._local_directory)
remote = os.path.join(self._remote_directory, local[len(self._local_directory):].lstrip(os.path.sep))
# Make it so local is world read/writable -- so that the remote worksheet
# process can write to it.
set_permissive_permissions(local)
return (local, remote)
| topisani/sagenb | sagenb/interfaces/expect.py | Python | gpl-3.0 | 14,073 |
from numpy import linspace, array, arange, tile, dot, zeros
from .gaussian import Gaussian
from ..utils import rk4
class BasisFunctions(object):
def __init__(self, n_basis, duration, dt, sigma):
self.n_basis = n_basis
means = linspace(0, duration, n_basis)
# FIXME:
variances = duration / (sigma * n_basis)**2
gaussians = [Gaussian(array([means[k]]), array([[variances]]))
for k in range(len(means))]
self.x = arange(0., duration, dt)
y = array([gaussians[k].normal(self.x.reshape(-1, 1)) for k in range(len(means))])
self.z = y / tile(sum(y, 0), (n_basis, 1))
def trajectory(self, weights):
return dot(weights, self.z)
class MovementPrimitive(object):
def __init__(self, duration, n_basis, dt, stiffness=0., damping=0.):
"""
:param float duration: duration of the movement in seconds
:param list dt: time step used for numerical integration
"""
self.dt = dt
self.duration = duration
self.stiffness = stiffness
self.damping = damping
self.basis = BasisFunctions(n_basis, self.duration, dt, 2.)
self.traj = zeros((self.duration/dt, 3))
self.acc = zeros(self.duration/dt) # +1 due to ..utils.rk4 implementation
def acceleration(self, t, state):
intrinsic_acc = - self.stiffness*state[0] - self.damping*state[1]
return array([state[1], self.acc[t / self.dt] + intrinsic_acc])
def trajectory(self, x0, command):
self.acc = self.basis.trajectory(command)
# self.acc[-1] = self.acc[-2] # still due to ..utils.rk4 implementation
t = 0.
self.traj[0, :] = [x0[0], x0[1], self.acc[0]]
i_t = 1
state = x0
while i_t < self.duration / self.dt:
# print i_t, t, self.duration - self.dt
t, state = rk4(t, self.dt, state, self.acceleration)
# print state
self.traj[i_t, :] = [state[0], state[1], self.acc[i_t]]
i_t += 1
return self.traj
| flowersteam/explauto | explauto/models/motor_primitive.py | Python | gpl-3.0 | 2,072 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <[email protected]>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.config.configexc."""
import textwrap
import pytest
from qutebrowser.config import configexc
from qutebrowser.utils import usertypes
def test_validation_error():
e = configexc.ValidationError('val', 'msg')
assert e.option is None
assert str(e) == "Invalid value 'val' - msg"
@pytest.mark.parametrize('deleted, renamed, expected', [
(False, None, "No option 'opt'"),
(True, None, "No option 'opt' (this option was removed from qutebrowser)"),
(False, 'new', "No option 'opt' (this option was renamed to 'new')"),
])
def test_no_option_error(deleted, renamed, expected):
e = configexc.NoOptionError('opt', deleted=deleted, renamed=renamed)
assert e.option == 'opt'
assert str(e) == expected
def test_no_option_error_clash():
with pytest.raises(AssertionError):
configexc.NoOptionError('opt', deleted=True, renamed='foo')
def test_backend_error():
e = configexc.BackendError(usertypes.Backend.QtWebKit)
assert str(e) == "This setting is not available with the QtWebKit backend!"
def test_desc_with_text():
"""Test ConfigErrorDesc.with_text."""
old = configexc.ConfigErrorDesc("Error text", Exception("Exception text"))
new = old.with_text("additional text")
assert str(new) == 'Error text (additional text): Exception text'
@pytest.fixture
def errors():
"""Get a ConfigFileErrors object."""
err1 = configexc.ConfigErrorDesc("Error text 1", Exception("Exception 1"))
err2 = configexc.ConfigErrorDesc("Error text 2", Exception("Exception 2"),
"Fake traceback")
return configexc.ConfigFileErrors("config.py", [err1, err2])
def test_config_file_errors_str(errors):
assert str(errors).splitlines() == [
'Errors occurred while reading config.py:',
' Error text 1: Exception 1',
' Error text 2: Exception 2',
]
def test_config_file_errors_html(errors):
html = errors.to_html()
assert textwrap.dedent(html) == textwrap.dedent("""
Errors occurred while reading config.py:
<ul>
<li>
<b>Error text 1</b>: Exception 1
</li>
<li>
<b>Error text 2</b>: Exception 2
<pre>
Fake traceback
</pre>
</li>
</ul>
""")
# Make sure the traceback is not indented
assert '<pre>\nFake traceback\n' in html
| NoctuaNivalis/qutebrowser | tests/unit/config/test_configexc.py | Python | gpl-3.0 | 3,233 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Visualize the system cells and MPI domains. Run ESPResSo in parallel
to color particles by node. With OpenMPI, this can be achieved using
``mpiexec -n 4 ./pypresso ../samples/visualization_cellsystem.py``.
Set property ``system.cell_system.node_grid = [i, j, k]`` (with ``i * j * k``
equal to the number of MPI ranks) to change the way the cellsystem is
partitioned. Only the domain of MPI rank 0 will be shown in wireframe.
"""
import espressomd
import espressomd.visualization_opengl
import numpy as np
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
box = [40, 30, 20]
system = espressomd.System(box_l=box)
visualizer = espressomd.visualization_opengl.openGLLive(
system,
window_size=[800, 800],
background_color=[0, 0, 0],
camera_position=[20, 15, 80],
particle_coloring='node',
draw_nodes=True,
draw_cells=True)
system.time_step = 0.0005
system.cell_system.set_regular_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0.4
#system.cell_system.node_grid = [i, j, k]
for i in range(100):
system.part.add(pos=box * np.random.random(3))
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=100.0, sigma=1.0, cutoff=3.0, shift="auto")
energy = system.analysis.energy()
print(f"Before Minimization: E_total = {energy['total']:.2e}")
system.integrator.set_steepest_descent(f_max=50, gamma=30.0,
max_displacement=0.001)
system.integrator.run(10000)
system.integrator.set_vv()
energy = system.analysis.energy()
print(f"After Minimization: E_total = {energy['total']:.2e}")
print("Tune skin")
system.cell_system.tune_skin(0.1, 4.0, 1e-1, 1000)
print(system.cell_system.get_state())
system.thermostat.set_langevin(kT=1, gamma=1, seed=42)
visualizer.run(1)
| espressomd/espresso | samples/visualization_cellsystem.py | Python | gpl-3.0 | 2,509 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from views import *
from api import *
urlpatterns = patterns('',
# developer list view
url(r'^$', DeveloperListView.as_view()),
url(r'^add$', DeveloperAddView.as_view()),
url(r'^save/$', DeveloperPluginSaveView.as_view()),
url(r'^docs$', DeveloperDocsView.as_view()),
url(r'^(?P<plugin_hash>[^/]+)$', DeveloperDetailView.as_view(), name='developer-detail'),
url(r'^(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)$', DeveloperVersionView.as_view(), name='developer-version'),
url(r'^(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)/deps$', DeveloperDepsView.as_view(), name='developer-deps'),
url(r'^(?P<plugin_hash>[^/]+)/add$', DeveloperVersionView.as_view(), name='developer-version-add'),
#live preview
url(r'^live/(?P<version_id>[0-9]+)/$',
DeveloperLiveAdminView.as_view(), name='developer-live-admin'),
url(r'^live/(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)$',
DeveloperLiveView.as_view(), name='developer-live'),
# API urls
url(r'^checkname/$', CheckNameView.as_view()),
url(r'^deletedep/$', DeleteDepView.as_view()),
# Globalproxy
url(r'^api/databaseSchemas/$', DatabaseSchemasView.as_view()),
url(r'^api/getProfileInformation/$', getProfileInformationView.as_view()),
url(r'^api/getFingerprints/$', getFingerprintsView.as_view()),
url(r'^api/getFingerprints/(?P<quest_slug>[^/]+)$', getFingerprintsView.as_view()),
# FingerprintProxy
url(r'^api/getFingerprintUID/(?P<fingerprint>[^/]+)$', getFingerprintUIDView.as_view()),
url(r'^api/getAnswers/(?P<fingerprint>[^/]+)$', getAnswersView.as_view()),
# datastore
url(r'^api/store/getExtra/(?P<fingerprint>[^/]+)$', getExtraView.as_view()),
url(r'^api/store/getDocuments/(?P<fingerprint>[^/]+)$', getDocumentsView.as_view()),
url(r'^api/store/putDocuments/(?P<fingerprint>[^/]+)$', putDocumentsView.as_view()),
url(r'^api/store/getPublications/(?P<fingerprint>[^/]+)$', getPublicationsView.as_view()),
url(r'^api/store/getComments/(?P<fingerprint>[^/]+)$', getCommentsView.as_view()),
url(r'^api/store/putComment/(?P<fingerprint>[^/]+)$', putCommentView.as_view()),
# fast links to dependency latest revision
url(r'^file/(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)/(?P<filename>[^/]+)$',
DeveloperFileView.as_view(), name='developer-file'),
)
| bioinformatics-ua/catalogue | emif/developer/urls.py | Python | gpl-3.0 | 3,165 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import budgetdatapackage
import datapackage
import datetime
from nose.tools import raises
from datapackage import compat
class TestBudgetResource(object):
def setup(self):
self.values = {
'currency': 'ISK',
'dateLastUpdated': '2014-04-22',
'datePublished': '1982-04-22',
'fiscalYear': '2014',
'granularity': 'transactional',
'status': 'approved',
'type': 'expenditure',
'location': 'IS',
'url': 'http://iceland.is/budgets.csv'}
def test_create_resource(self):
resource = budgetdatapackage.BudgetResource(**self.values)
assert resource.currency == self.values['currency']
last_updated = datetime.datetime.strptime(
self.values['dateLastUpdated'], '%Y-%m-%d').date()
assert resource.dateLastUpdated == last_updated
published = datetime.datetime.strptime(
self.values['datePublished'], '%Y-%m-%d').date()
assert resource.datePublished == published
assert resource.fiscalYear == self.values['fiscalYear']
assert resource.granularity == self.values['granularity']
assert resource.status == self.values['status']
assert resource.type == self.values['type']
assert resource.location == self.values['location']
assert resource.url == self.values['url']
assert resource.standard == '1.0.0-alpha'
def test_resource_can_be_used_with_datapackage(self):
"""Checks if it's possible to create a datapackage with a
budget resource"""
moneys = budgetdatapackage.BudgetResource(**self.values)
finances = datapackage.DataPackage(
name="finances", license="PDDL", resources=[moneys])
assert finances.name == "finances"
assert len(finances.resources) == 1
assert finances.resources[0].granularity == self.values['granularity']
@raises(ValueError)
def test_create_resource_missing_required_field(self):
del self.values['fiscalYear']
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_currency(self):
self.values['currency'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_dateLastPublished(self):
self.values['dateLastUpdated'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_datePublished(self):
self.values['datePublished'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_fiscalYear(self):
self.values['fiscalYear'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_granularity(self):
self.values['granularity'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_status(self):
self.values['status'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_type(self):
self.values['type'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_location(self):
self.values['location'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
| trickvi/budgetdatapackage | tests/test_resource.py | Python | gpl-3.0 | 3,567 |
#!/usr/bin/python3
# https://bugzilla.altlinux.org/show_bug.cgi?id=33532
#!/usr/bin/env -S python3 -u
# -*- coding: utf-8 -*-
#
# A simple message-sending script
# TODO: When error: No handlers could be found for logger "pyxmpp.Client"
import os, sys
# python-module-pyxmpp
from pyxmpp2.jid import JID
from pyxmpp2.jabber.simple import send_message
# set in korinf config file
jid = os.environ['KORINFERJID']
password = os.environ['KORINFERJIDPASSWD']
if len(sys.argv)!=4:
print("Usage:")
print("\t%s recipient_jid subject body" % (sys.argv[0],))
print("example:")
print("\t%s test1@localhost Test 'this is test'" % (sys.argv[0],))
sys.exit(1)
recpt,subject,body=sys.argv[1:]
jid = JID(jid)
if not jid.resource:
jid = JID(jid.node,jid.domain,"korinf")
recpt = JID(recpt)
send_message(jid,password,recpt,body,subject)
| vitlav/korinf | share/eterbuild/korinf/helpers/send_via_pyxmpp.py | Python | agpl-3.0 | 849 |
#! usr/bin/env python
# coding: utf8
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
"""metalex is general tool for lexicographic and metalexicographic activities
Copyright (C) 2017 by Elvis MBONING
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact: [email protected]
---------------------------------------------------------------------------
makeBalise transform extracted articles into well formed xml file.
It can also generate HTML file for article edition
Packages:
>>> sudo apt-get install python-html5lib
>>> sudo apt-get install python-lxml
>>> sudo apt-get install python-bs4
Usage:
>>> from metalex.dicXmilised import *
>>> dicoHtml(save=True)
"""
# ----Internal Modules------------------------------------------------------
import metalex
from .composeArticle import *
from .dicXmlTool import *
# ----External Modules------------------------------------------------------
import re
import sys
import codecs
import os
from bs4 import BeautifulSoup
from random import sample
from shutil import copyfile
from lxml import etree
from termcolor import colored
# -----Exported Functions-----------------------------------------------------
__all__ = ['BaliseXML', 'dico_html']
# -----Global Variables-----------------------------------------------------
components = {
'xml' : {
'metalexMetadata' : ['metalexMetadata', 'projectName', 'author',
'dateCreation', 'comment', 'contributors', 'candidate'],
'metalexContent' : ['article', 'definition', 'example', 'figured', 'contrary',
'entry', 'flexion', 'category', 'gender', 'rection', 'phonetic',
'identificationComponent', 'treatmentComponent', 'cte_cat',
'processingUnit', 'cgr_pt', 'cgr_vrg', 'cgr_fpar', 'cgr_opar',
'cgr_ocrch', 'cgr_fcrch', 'metalexContent', 'cte_gender',
'metalexResultDictionary']
},
'tei' : {
'teiHeader' : ['teiHeader', 'text', 'TEI', 'fileDesc', 'titleStmt',
'title', 'publicationStmt', 'p', 'sourceDesc', 'author'],
'text' : ['body', 'head', 'entry', 'form', 'orth', 'gramGrp',
'sense', 'def', 'cite', 'quote', 'span', 'usg', 'bibl',
'pos', 'genre', 'number', 'pron', 'etym']
},
'lmf' : {
'GlobalInformation' : ['LexicalResource', 'feat', 'p', 'GlobalInformation'],
'Lexicon' : ['Lexicon', 'feat', 'LexicalEntry', 'WordForm',
'Definition', 'Sense', 'Lexicon']
},
'dtd' : ['ELEMENT', 'ATTRIBUTE', 'PCDATA', 'CDATA', 'REQUIRED', 'IMPLIED'],
'xsd' : []
}
codifArticles = []
# ----------------------------------------------------------
def dico_html(save=False):
"""Build HTML editor file of the all articles
:return file: metalexViewerEditor.html
"""
print('\n --- %s %s \n\n' %(colored('Part 4: Generate Output formats', attrs=['bold']), '--'*25))
metalex.plugins
instanceHtml = BaliseHTML()
filepath = metalex.html_template
metalex.utils.create_temp()
if metalex.utils.in_dir('CopymetalexTemplate.html'):
copyfile(filepath, 'CopymetalexTemplate.html')
souphtl = instanceHtml.html_inject('CopymetalexTemplate.html')
if save:
metalex.utils.go_to_dicresult()
name = metalex.currentOcr+'_metalexViewerEditor.html'
with codecs.open(name, 'w') as htmlresult:
htmlresult.write(souphtl)
metalex.utils.create_temp()
os.remove('CopymetalexTemplate.html')
message = "*"+name+"* has correctly been generated > Saved in dicTemp folder"
metalex.logs.manageLog.write_log(message)
else:
souphtl = instanceHtml.html_inject('CopymetalexTemplate.html')
if save:
metalex.utils.go_to_dicresult()
with codecs.open(name, 'w') as htmlresult:
htmlresult.write(souphtl)
metalex.utils.create_temp()
os.remove('CopymetalexTemplate.html')
message = "*"+name+"* has correctly been generated > Saved in dicTemp folder"
metalex.logs.manageLog.write_log(message)
print('\n\n --- %s --------------- \n\n' %colored('MetaLex Processes was ended: consult results data in "dicTemp" folder',
'green', attrs=['bold']))
class BaliseHTML():
def __init__(self):
self.resultHtml = ''
def html_inject(self, template):
"""Create prettify HTML file all previous data generated
:return str: html (prettify by BeautifulSoup)
"""
instanceXml = BaliseXML()
contentxml = instanceXml.put_xml(typ='xml', save=True)
metalex.utils.create_temp()
soupXml = BeautifulSoup(contentxml, "html.parser")
projectconf = metalex.utils.read_conf()
Hauthor, Hname = projectconf['Author'], projectconf['Projectname'],
Hdate,Hcomment = projectconf['Creationdate'], projectconf['Comment']
Hcontrib = projectconf['Contributors']
filetemplate = codecs.open(template, 'r', 'utf-8')
souphtml = BeautifulSoup(filetemplate, "html5lib")
content = souphtml.find('div', attrs={'id': 'all-articles'})
author = content.find('h3', attrs={'id': 'author'})
author.string = 'main: '+Hauthor
date = content.find('h5', attrs={'id': 'date'})
date.string = Hdate
descipt = content.find('p', attrs={'id': 'description'})
descipt.string = Hcomment
contrib = content.find('h4', attrs={'id': 'contributors'})
contrib.string = 'contributors: '+Hcontrib
project = content.find('h4', attrs={'id': 'projetname'})
project.string = Hname
articlesxml = soupXml.findAll('article')
articleshtml = souphtml.find('div', attrs={'id': 'mtl:articles'})
for x in articlesxml:
elementart = BeautifulSoup('<article id=""></article>', 'html5lib')
idart = x.get('id')
artlem = x.get_text()
elementart.article.append(artlem)
elementart.article['id'] = idart
articleshtml.append(elementart.find('article'))
listlemme = souphtml.find('ul', attrs={'id': 'list-articles'})
for x in articlesxml:
art = x.get_text()
idart = x.get('id')
lem = x.find('entry').get_text()
lemme = BeautifulSoup('<li class="w3-hover-light-grey"><span class="lemme" onclick="changeImage('+
"'"+idart+"'"+')">'+lem+'</span><span class="fa fa-plus w3-closebtn" onclick="add('+
"'"+idart+"'"+')"/></li>', 'html5lib')
listlemme.append(lemme.find('li'))
filetemplate.close()
self.resultHtml = souphtml.prettify('utf-8')
return self.resultHtml
class BaliseXML ():
"""Build XML file type (xml|tei|lmf) with global metadata of the project
:param typ: str
:return obj: instance of BaliseXML
"""
def __init__(self, typ="xml"):
self.typ = typ
def build_structure(self, data, Sfile=None, typ='dtd'):
return False
def message(self, name):
return "*"+name+"* dictionary articles formated in xml is"+\
" created > Saved in dicTemp folder"
def put_xml(self, typ='xml', save=False):
"""Create well formed (xml|tei|lmf) file with metadata and content xml
:return metalexXml
"""
metadata = self.xml_metadata(typ)
content = self.xml_content(typ)
metalex.utils.go_to_dicresult()
if typ == 'xml':
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'.xml'
metalexXml = self.balise(metadata+content, 'metalexResultDictionary',
attr={'xmlns':'https://www.w3schools.com',
'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-in',
'xsi:schemaLocation':'metalexSchemaXML.xsd'})
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'metalexResultDictionary', attr={})
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
if typ == 'tei':
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'-TEI.xml'
metalexXml = self.balise(metadata+content, 'TEI', typ= 'tei')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'TEI', typ= 'tei')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
if typ == 'lmf':
os.listdir('.')
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'-LMF.xml'
metalexXml = self.balise(metadata+content, 'LexicalResource', attr={'dtdVersion':'15'}, typ= 'lmf')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'LexicalResource', attr={'dtdVersion':'15'}, typ= 'lmf')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
def xml_metadata(self, typ='xml'):
"""Create xml metadata file with configuration of the project
:return str: metadata
"""
metalex.utils.create_temp()
projectconf = metalex.utils.read_conf()
contribtab = projectconf['Contributors'].split(',') \
if projectconf['Contributors'].find(',') else projectconf['Contributors']
contrib = ''
if typ == 'xml':
author = self.balise(projectconf['Author'], 'author', typ)
name = self.balise(projectconf['Projectname'].strip(), 'projectName', typ)
date = self.balise(projectconf['Creationdate'].strip(), 'dateCreation', typ)
comment = self.balise(projectconf['Comment'], 'comment', typ)
if len(contribtab) > 1:
for data in contribtab: contrib += self.balise(data.strip(), 'candidate', typ)
else: contrib = self.balise(''.join(contribtab), 'candidate', typ)
contrib = self.balise(contrib, 'contributors', typ)
cont = name+author+date+comment+contrib
metadataxml = self.balise(cont, 'metalexMetadata', typ)
return metadataxml
if typ == 'tei':
if len(contribtab) > 1:
for data in contribtab:
if len(data) > 2: contrib += self.balise(data.strip(), 'span',
attr={'content':'contributor'}, typ='tei')
else: contrib = self.balise(''.join(contribtab), 'span', typ='tei')
author = self.balise(projectconf['Author'], 'author', typ='tei')
title = self.balise(projectconf['Projectname'], 'title', typ='tei')
RtitleStmt = self.balise(title, 'titleStmt', typ='tei')
pdate = self.balise(projectconf['Creationdate'], 'p', typ='tei')
pcomment = self.balise(projectconf['Comment'], 'p', typ='tei')
pcontrib = self.balise(contrib, 'p', attr={'content':'contributors'}, typ='tei')
Rpubli = self.balise(author+pdate+pcomment+pcontrib, 'publicationStmt', typ='tei')
sourc = self.balise('TEI metadata for metalex project output', 'p', typ='tei')
Rsourc = self.balise(sourc, 'sourceDesc', typ='tei')
RfilD = self.balise(RtitleStmt+Rpubli+Rsourc, 'fileDesc', typ='tei')
metadatatei = self.balise(RfilD, 'teiHeader', typ='tei')
return metadatatei
if typ == 'lmf':
if len(contribtab) > 1:
for data in contribtab:
if len(data) > 2: contrib += data.strip()+', '
else: contrib = ', '.join(contribtab)
enc = self.balise('', 'feat', attr={'att':'languageCoding', 'val':'utf-8'},
typ='lmf', sclose=True)
pauthor = self.balise('', 'feat', attr={'att':'author', 'val':projectconf['Author'].strip()},
typ='lmf', sclose=True)
pdate = self.balise('', 'feat', attr={'att':'dateCreation', 'val':projectconf['Creationdate'].strip()},
typ='lmf', sclose=True)
pname = self.balise('', 'feat', attr={'att':'projectName', 'val':projectconf['Projectname'].strip()},
typ='lmf', sclose=True)
pcomment = self.balise('', 'feat', attr={'att':'comment', 'val':projectconf['Comment'].strip()},
typ='lmf', sclose=True)
pcontrib = self.balise('', 'feat', attr={'att':'contributors', 'val':contrib.strip(', ')},
typ='lmf', sclose=True)
meta = self.balise('', 'p', attr={'att':'meta', 'val':'TEI metadata for metalex project output'},
typ='lmf', sclose=True)
metadatalmf = self.balise(enc+pauthor+pname+meta+pdate+pcomment+pcontrib, 'GlobalInformation', typ='lmf')
return metadatalmf
def balise_content_article (self):
data = get_data_articles('text')
cod = StructuredWithCodif(data, 'xml')
resultArticles = []
for art in cod.format_articles():
article_type_form(art)
if article_type_form(art) == '1':
partArt = re.search(r'(([a-zéèàûô]+)\s(<cte_cat>.+</cte_cat>)\s(.+)<cgr_pt>\.</cgr_pt>)', art, re.I)
if partArt != None:
ident, entry, cat, treat = partArt.group(1), partArt.group(2), partArt.group(3), partArt.group(4)
id = generate_id()
entry = self.balise(entry, 'entry')
ident = self.balise(entry+cat, 'identificationComponent')
treat = self.balise(self.balise(treat, 'definition'), 'processingUnit')
article = self.balise(ident+self.balise(treat, 'treatmentComponent'), 'article', attr={'id':id})
resultArticles.append(article)
if article_type_form(art) == '2':
research = r'(([a-zéèàûô]+)\s(<cte_cat>.+</cte_cat>\s<cte_gender>..</cte_gender>)\s(.+)<cgr_pt>\.</cgr_pt>)'
partArt = re.search(research, art, re.I)
if partArt != None:
ident, entry, cat, treat = partArt.group(1), partArt.group(2), partArt.group(3), partArt.group(4)
id = generate_id()
entry = self.balise(entry, 'entry')
ident = self.balise(entry+cat, 'identificationComponent')
if not re.search(r'(<cgr_pt>\.</cgr_pt>|<cte_cat>.+</cte_cat>|<cgr_vrg>,</cgr_vrg>)', partArt.group(4), re.I):
treat = self.balise(self.balise(treat+'.', 'definition'), 'processingUnit')
article = self.balise(ident+self.balise(treat, 'treatmentComponent'), 'article', attr={'id':id})
resultArticles.append(article)
elif partArt.group(4).find(' et ') != -1:
suite = 'hahaha'
return resultArticles
def xml_content(self, typ='xml', forme='text'):
"""Create xml content file (representing articles) with data articles extracting
:return str: contentXml
"""
content = ''
contentXml = ''
data = self.balise_content_article()
if typ == 'xml':
if forme == 'pickle':
data = get_data_articles('pickle')
for dicart in data:
for art in dicart.keys():
art = self.balise(dicart[art], 'article', art=True)
content += art
contentXml = self.balise(content, 'metalexContent')
return contentXml
else:
for art in data: content += art
contentXml = self.balise(content, 'metalexContent', attr={'totalArticle': str(len(data))})
return contentXml
if typ == 'tei':
for art in data:
soupart = BeautifulSoup(art, 'html.parser')
orth = soupart.find('entry').getText()
atOrth = soupart.find('article').get('id')
orth = self.balise(orth, 'orth', {'id': atOrth}, typ='tei')
formB = self.balise(orth, 'form', attr={'xml:lang':'fr', 'type':'lemma'}, typ='tei')
pos = soupart.find('cte_cat').getText()
posB = self.balise(pos, 'pos', typ='tei')
genB = ''
if soupart.find('cte_gender'): genB = soupart.find('cte_gender').getText().strip()
if genB == 'f.' or genB == 'm.': genB = self.balise(genB, 'genre', typ='tei')
gramgrp = self.balise(posB+genB, 'gramGrp', typ='tei')
sens = soupart.find('processingunit').getText().replace(' .', '.')
defi = self.balise(sens, 'def', typ='tei')
if sens != None: sens = self.balise(defi, 'sense', typ='tei')
entry = self.balise(formB+gramgrp+sens, 'entry', typ='tei')
content += entry
body = self.balise(content, 'body', typ='tei')
contentXml = self.balise(body, 'text', attr={'totalArticle': str(len(data))}, typ='tei')
return contentXml
if typ == 'lmf':
for art in data:
soupart = BeautifulSoup(art, 'html.parser')
orth = soupart.find('entry').getText()
atOrth = soupart.find('article').get('id')
orth = self.balise('', 'feat', attr={'att':'writtenForm','val':orth},
typ='lmf', sclose=True)
wordF = self.balise(orth, 'WordForm', attr={'id': atOrth}, typ='lmf')
pos = soupart.find('cte_cat').getText()
posB = self.balise('', 'feat', attr={'att':'partOfSpeech','val':pos},
typ='lmf', sclose=True)
genB = ''
if soupart.find('cte_gender'): genB = soupart.find('cte_gender').getText().strip()
if genB == 'f.' or genB == 'm.':
genB = self.balise('', 'feat', attr={'att':'grammaticalNumber','val': genB},
typ='lmf', sclose=True)
sens = soupart.find('processingunit').getText().replace(' .', '.')
sensnb = self.balise('', 'feat', attr={'att':'sensNumber','val':'1'},
typ='lmf', sclose=True)
definb = self.balise('', 'feat', attr={'att':'text','val':sens.strip()},
typ='lmf', sclose=True)
defi = self.balise(definb, 'Definition', typ='lmf')
if sens != None: sens = self.balise(sensnb+defi, 'Sense', typ='lmf')
entry = self.balise(wordF+posB+genB+sens, 'LexicalEntry', typ='lmf')
content += entry
body = self.balise('', 'feat', attr={'att':'language','val':'fra'},
typ='lmf', sclose=True)+content
contentXml = self.balise(body, 'Lexicon', attr={'totalArticle': str(len(data))}, typ='lmf')
return contentXml
def balise(self, element, markup, sclose=False, attr=None, typ='xml', art=False):
"""Markup data with a specific format type (xml|tei|lmf)
:return str: balised element
"""
if typ == 'xml':
if markup in components['xml']['metalexContent'] or markup \
in components['xml']['metalexMetadata']:
if art:
element = self.chevron(markup, attr, art=True)+element+self.chevron(markup, attr, False)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
if typ == 'tei':
if markup in components['tei']['text'] or markup in components['tei']['teiHeader']:
if art:
element = self.chevron(markup, attr, art=True)+element+self.chevron(markup, attr, False)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
if typ == 'lmf':
if markup in components['lmf']['GlobalInformation'] \
or components['lmf']['Lexicon']:
if sclose:
element = self.chevron(markup, attr, True, sclose=True)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
def chevron(self, el, attr, openchev=True, art=False, sclose=False):
"""Put tag around the data of element
:return str: tagging element
"""
idart = generate_id()
if art and attr == None:
if openchev : return "<"+el+" id='"+idart+"' class='data-entry'"+">"
if not openchev: return "</"+el+">"
if sclose : return "<"+el+" id='"+idart+"'/>"
if art and attr != None:
allattrib = ''
for at in attr.keys():
allattrib += ' '+at+'="'+attr[at]+'"'
if openchev and not sclose : return "<"+el+" id='"+idart+"' class='data-entry'"+' '+allattrib+">"
if openchev and sclose: return "<"+el+" id='"+idart+"' class='data-entry'"+' '+allattrib+"/>"
if not openchev: return "</"+el+">"
elif art == False and attr != None:
#print openchev
allattrib = ''
for at in attr.keys(): allattrib += ' '+at+'="'+attr[at]+'"'
if openchev and not sclose: return "<"+el+' '+allattrib+">"
if openchev and sclose: return "<"+el+' '+allattrib+"/>"
if not openchev: return "</"+el+">"
elif art == False and attr == None:
if openchev : return "<"+el+">"
if sclose : return "<"+el+"/>"
if not openchev: return "</"+el+">"
| Levis0045/MetaLex | metalex/xmlised/makeBalise.py | Python | agpl-3.0 | 26,115 |
# -*- coding: utf-8 -*-
import cgitb
import fnmatch
import io
import logging
import click
import pyjsdoc
import pyjsparser
import sys
from .parser.parser import ModuleMatcher
from .parser.visitor import Visitor, SKIP
from . import jsdoc
class Printer(Visitor):
def __init__(self, level=0):
super(Printer, self).__init__()
self.level = level
def _print(self, text):
print ' ' * self.level, text
def enter_generic(self, node):
self._print(node['type'])
self.level += 1
def exit_generic(self, node):
self.level -= 1
def enter_Identifier(self, node):
self._print(node['name'])
return SKIP
def enter_Literal(self, node):
self._print(node['value'])
return SKIP
def enter_BinaryExpression(self, node):
self._print(node['operator'])
self.level += 1
def visit_files(files, visitor, ctx):
for name in files:
with io.open(name) as f:
ctx.logger.info("%s", name)
try:
yield visitor().visit(pyjsparser.parse(f.read()))
except Exception as e:
if ctx.logger.isEnabledFor(logging.DEBUG):
ctx.logger.exception("while visiting %s", name)
else:
ctx.logger.error("%s while visiting %s", e, name)
# bunch of modules various bits depend on which are not statically defined
# (or are outside the scope of the system)
ABSTRACT_MODULES = [
jsdoc.ModuleDoc({
'module': 'web.web_client',
'dependency': {'web.AbstractWebClient'},
'exports': jsdoc.NSDoc({
'name': 'web_client',
'doc': 'instance of AbstractWebClient',
}),
}),
jsdoc.ModuleDoc({
'module': 'web.Tour',
'dependency': {'web_tour.TourManager'},
'exports': jsdoc.NSDoc({
'name': 'Tour',
'doc': 'maybe tourmanager instance?',
}),
}),
# OH FOR FUCK'S SAKE
jsdoc.ModuleDoc({
'module': 'summernote/summernote',
'exports': jsdoc.NSDoc({'doc': "totally real summernote"}),
})
]
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.option('-v', '--verbose', count=True)
@click.option('-q', '--quiet', count=True)
@click.pass_context
def autojsdoc(ctx, verbose, quiet):
logging.basicConfig(
level=logging.INFO + (quiet - verbose) * 10,
format="[%(levelname)s %(created)f] %(message)s",
)
ctx.logger = logging.getLogger('autojsdoc')
ctx.visitor = None
ctx.files = []
ctx.kw = {}
@autojsdoc.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def ast(ctx, files):
""" Prints a structure tree of the provided files
"""
if not files:
print(ctx.get_help())
visit_files(files, lambda: Printer(level=1), ctx.parent)
@autojsdoc.command()
@click.option('-m', '--module', multiple=True, help="Only shows dependencies matching any of the patterns")
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def dependencies(ctx, module, files):
""" Prints a dot file of all modules to stdout
"""
if not files:
print(ctx.get_help())
byname = {
mod.name: mod.dependencies
for mod in ABSTRACT_MODULES
}
for modules in visit_files(files, ModuleMatcher, ctx.parent):
for mod in modules:
byname[mod.name] = mod.dependencies
print('digraph dependencies {')
todo = set()
# if module filters, roots are only matching modules
if module:
for f in module:
todo.update(fnmatch.filter(byname.keys(), f))
for m in todo:
# set a different box for selected roots
print(' "%s" [color=orangered]' % m)
else:
# otherwise check all modules
todo.update(byname)
done = set()
while todo:
node = todo.pop()
if node in done:
continue
done.add(node)
deps = byname[node]
todo.update(deps - done)
for dep in deps:
print(' "%s" -> "%s";' % (node, dep))
print('}')
try:
autojsdoc.main(prog_name='autojsdoc')
except Exception:
print(cgitb.text(sys.exc_info()))
| Aravinthu/odoo | doc/_extensions/autojsdoc/__main__.py | Python | agpl-3.0 | 4,294 |
import os
import sys
from src import impl as rlcs
import utils as ut
import analysis as anls
import matplotlib.pyplot as plt
import logging
import pickle as pkl
import time
config = ut.loadConfig('config')
sylbSimFolder=config['sylbSimFolder']
transFolder=config['transFolder']
lblDir=config['lblDir']
onsDir=config['onsDir']
resultDir=config['resultDir']
queryList = [['DHE','RE','DHE','RE','KI','TA','TA','KI','NA','TA','TA','KI','TA','TA','KI','NA'],['TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA'], ['TA','KI','TA','TA','KI','TA','TA','KI'], ['TA','TA','KI','TA','TA','KI'], ['TA', 'TA','KI', 'TA'],['KI', 'TA', 'TA', 'KI'], ['TA','TA','KI','NA'], ['DHA','GE','TA','TA']]
queryLenCheck = [4,6,8,16]
for query in queryList:
if len(query) not in queryLenCheck:
print 'The query is not of correct length!!'
sys.exit()
masterData = ut.getAllSylbData(tPath = transFolder, lblDir = lblDir, onsDir = onsDir)
res = anls.getPatternsInTransInGTPos(masterData, queryList)
| swapnilgt/percPatternDiscovery | rlcs/preAnalysisRun.py | Python | agpl-3.0 | 1,024 |
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.api.factories import viewset_factory
from shoop.core.api.orders import OrderViewSet
from shoop.core.api.products import ProductViewSet, ShopProductViewSet
from shoop.core.models import Contact, Shop
from shoop.core.models.categories import Category
def populate_core_api(router):
"""
:param router: Router
:type router: rest_framework.routers.DefaultRouter
"""
router.register("shoop/category", viewset_factory(Category))
router.register("shoop/contact", viewset_factory(Contact))
router.register("shoop/order", OrderViewSet)
router.register("shoop/product", ProductViewSet)
router.register("shoop/shop", viewset_factory(Shop))
router.register("shoop/shop_product", ShopProductViewSet)
| janusnic/shoop | shoop/core/api/__init__.py | Python | agpl-3.0 | 983 |
# coding: utf-8
from django.contrib import admin
from hub.models import ExtraUserDetail
from .models import AuthorizedApplication
# Register your models here.
admin.site.register(AuthorizedApplication)
admin.site.register(ExtraUserDetail)
| onaio/kpi | kpi/admin.py | Python | agpl-3.0 | 241 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libidl(AutotoolsPackage):
"""libraries for Interface Definition Language files"""
homepage = "https://developer.gnome.org/"
url = "https://ftp.gnome.org/pub/gnome/sources/libIDL/0.8/libIDL-0.8.14.tar.bz2"
version('0.8.14', sha256='c5d24d8c096546353fbc7cedf208392d5a02afe9d56ebcc1cccb258d7c4d2220')
depends_on('pkgconfig', type='build')
depends_on('glib')
| iulian787/spack | var/spack/repos/builtin/packages/libidl/package.py | Python | lgpl-2.1 | 614 |
#!/usr/bin/python3
import os, sys, random
pandoraPath = os.getenv('PANDORAPATH', '/usr/local/pandora')
sys.path.append(pandoraPath+'/bin')
sys.path.append(pandoraPath+'/lib')
from pyPandora import Config, World, Agent, SizeInt
class MyAgent(Agent):
gatheredResources = 0
def __init__(self, id):
Agent.__init__( self, id)
print('constructing agent: ',self.id)
def updateState(self):
print('updating state of: ',self.id)
newPosition = self.position
newPosition._x = newPosition._x + random.randint(-1,1)
newPosition._y = newPosition._y + random.randint(-1,1)
if self.getWorld().checkPosition(newPosition):
self.position = newPosition
self.gatheredResources = self.gatheredResources + self.getWorld().getValue('resources', self.position)
self.getWorld().setValue('resources', self.position, 0)
def registerAttributes(self):
self.registerIntAttribute('resources')
def serialize(self):
print('serializing MyAgent: ',self.id)
self.serializeIntAttribute('resources', self.gatheredResources)
class MyWorld(World):
def __init__(self, config):
World.__init__( self, config)
print('constructing MyWorld')
def createRasters(self):
print('creating rasters')
self.registerDynamicRaster("resources", 1)
self.getDynamicRaster("resources").setInitValues(0, 10, 0)
return
def createAgents(self):
print('creating agents')
for i in range (0, 10):
newAgent = MyAgent('MyAgent_'+str(i))
self.addAgent(newAgent)
newAgent.setRandomPosition()
def main():
print('getting started with pyPandora')
numTimeSteps = 10
worldSize = SizeInt(64,64)
myConfig = Config(worldSize, numTimeSteps)
myWorld = MyWorld(myConfig)
myWorld.initialize()
myWorld.run()
print('simulation finished')
if __name__ == "__main__":
main()
| montanier/pandora | docs/tutorials/01_src/tutorial_pyPandora.py | Python | lgpl-3.0 | 2,012 |
import unittest
import json
from datetime import datetime
from pymongo import MongoClient
from apps.basic_resource import server
from apps.basic_resource.documents import Article, Comment, Vote
class ResourcePostListFieldItemListField(unittest.TestCase):
"""
Test if a HTTP POST that adds entries to a listfield in a item of a
listfield on a resource gives the right response and adds the data
in the database.
"""
@classmethod
def setUpClass(cls):
cls.app = server.app.test_client()
cls.mongo_client = MongoClient()
comment_id = "528a5250aa2649ffd8ce8a90"
cls.initial_data = {
'title': "Test title",
'text': "Test text",
'publish': True,
'publish_date': datetime(2013, 10, 9, 8, 7, 8),
'comments': [
Comment(
id=comment_id,
text="Test comment old",
email="[email protected]",
upvotes=[
Vote(
ip_address="1.4.1.2",
date=datetime(2012, 5, 2, 9, 1, 3),
name="Jzorz"
),
Vote(
ip_address="2.4.5.2",
date=datetime(2012, 8, 2, 8, 2, 1),
name="Nahnahnah"
)
]
),
Comment(
text="Test comment 2 old",
email="[email protected]",
upvotes=[
Vote(
ip_address="1.4.1.4",
date=datetime(2013, 5, 2, 9, 1, 3),
name="Zwefhalala"
),
Vote(
ip_address="2.4.9.2",
date=datetime(2013, 8, 2, 8, 2, 1),
name="Jhardikranall"
)
]
),
],
'top_comment': Comment(
text="Top comment",
email="[email protected]",
upvotes=[
Vote(
ip_address="5.4.1.2",
date=datetime(2012, 5, 2, 9, 2, 3),
name="Majananejjeew"
),
Vote(
ip_address="2.4.1.2",
date=datetime(2012, 3, 2, 8, 2, 1),
name="Hoeieieie"
)
]
),
'tags': ["tag1", "tag2", "tag3"]
}
article = Article(**cls.initial_data).save()
cls.add_data = {
'ip_address': "5.5.5.5",
'name': "Wejejejeje"
}
cls.response = cls.app.post(
'/articles/{}/comments/{}/upvotes/'.format(
unicode(article['id']),
comment_id
),
headers={'content-type': 'application/json'},
data=json.dumps(cls.add_data)
)
@classmethod
def tearDownClass(cls):
cls.mongo_client.unittest_monkful.article.remove()
def test_status_code(self):
"""
Test if the response status code is 201.
"""
self.assertEqual(self.response.status_code, 201)
def test_content_type(self):
"""
Test if the content-type header is 'application/json'.
"""
self.assertEqual(
self.response.headers['content-type'],
'application/json'
)
def test_json(self):
"""
Test if the response data is valid JSON.
"""
try:
json.loads(self.response.data)
except:
self.fail("Response is not valid JSON.")
def test_content(self):
"""
Test if the deserialized response data evaluates back to our
data we posted to the resource in `setUpClass`.
"""
response_data = json.loads(self.response.data)
# Remove the date field because it's auto generated and we
# didn't include it in the original posted data.
del response_data['date']
self.assertEqual(response_data, self.add_data)
def test_documents(self):
"""
Test if the POST-ed data really ended up in the document
"""
upvotes = Article.objects[0].comments[0].upvotes
self.assertEqual(len(upvotes), 3)
self.assertEqual(upvotes[2].ip_address, self.add_data['ip_address'])
| gitaarik/monkful | tests/tests/basic_resource/post_listfield_item_listfield.py | Python | lgpl-3.0 | 4,664 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('narglatch_sick')
mobileTemplate.setLevel(21)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(60)
mobileTemplate.setHideType("Bristley Hide")
mobileTemplate.setHideAmount(45)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(40)
mobileTemplate.setSocialGroup("narglatch")
mobileTemplate.setAssistRange(2)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_narglatch_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_2')
attacks.add('bm_slash_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('narglatch_sick', mobileTemplate)
return | ProjectSWGCore/NGECore2 | scripts/mobiles/naboo/narglatch_sick.py | Python | lgpl-3.0 | 1,632 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('water_thief')
mobileTemplate.setLevel(5)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("thug")
mobileTemplate.setAssistRange(4)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_tatooine_moisture_thief.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/sword/shared_sword_01.iff', WeaponType.ONEHANDEDMELEE, 1.0, 5, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('saberhit')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('water_thief', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/tatooine/water_thief.py | Python | lgpl-3.0 | 1,333 |
from charm.toolbox.pairinggroup import PairingGroup,GT,extract_key
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
from charm.toolbox.ABEnc import ABEnc
from charm.schemes.abenc.abenc_lsw08 import KPabe
debug = False
class HybridABEnc(ABEnc):
"""
>>> from charm.schemes.abenc.abenc_lsw08 import KPabe
>>> group = PairingGroup('SS512')
>>> kpabe = KPabe(group)
>>> hyb_abe = HybridABEnc(kpabe, group)
>>> access_policy = ['ONE', 'TWO', 'THREE']
>>> access_key = '((FOUR or THREE) and (TWO or ONE))'
>>> msg = b"hello world this is an important message."
>>> (master_public_key, master_key) = hyb_abe.setup()
>>> secret_key = hyb_abe.keygen(master_public_key, master_key, access_key)
>>> cipher_text = hyb_abe.encrypt(master_public_key, msg, access_policy)
>>> hyb_abe.decrypt(cipher_text, secret_key)
b'hello world this is an important message.'
"""
def __init__(self, scheme, groupObj):
ABEnc.__init__(self)
global abenc
# check properties (TODO)
abenc = scheme
self.group = groupObj
def setup(self):
return abenc.setup()
def keygen(self, pk, mk, object):
return abenc.keygen(pk, mk, object)
def encrypt(self, pk, M, object):
key = self.group.random(GT)
c1 = abenc.encrypt(pk, key, object)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, ct, sk):
c1, c2 = ct['c1'], ct['c2']
key = abenc.decrypt(c1, sk)
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
return cipher.decrypt(c2)
def main():
groupObj = PairingGroup('SS512')
kpabe = KPabe(groupObj)
hyb_abe = HybridABEnc(kpabe, groupObj)
access_key = '((ONE or TWO) and THREE)'
access_policy = ['ONE', 'TWO', 'THREE']
message = b"hello world this is an important message."
(pk, mk) = hyb_abe.setup()
if debug: print("pk => ", pk)
if debug: print("mk => ", mk)
sk = hyb_abe.keygen(pk, mk, access_key)
if debug: print("sk => ", sk)
ct = hyb_abe.encrypt(pk, message, access_policy)
mdec = hyb_abe.decrypt(ct, sk)
assert mdec == message, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
debug = True
main()
| JHUISI/charm | charm/adapters/kpabenc_adapt_hybrid.py | Python | lgpl-3.0 | 2,476 |
from colour import *
from cartesian import *
from timeit import *
def test_colour():
b = colour_create(0, 0, 0, 0)
for i in range(1, 100000):
c = colour_create(.5, .5, .5, 0)
b = colour_add(b, c)
def test_cartesian():
b = cartesian_create(0, 0, 0)
for i in range(1, 50000):
c = cartesian_create(.5, .5, .5)
b = cartesian_normalise(cartesian_add(b, c))
d = cartesian_dot(c, b)
e = cartesian_cross(c, b)
# if __name__ == '__main__':
# print(repeat("test_colour()",
# setup="from __main__ import test_colour", number=50))
| stinchjack/Python-Raytracer | raytracer/misc/speed.py | Python | lgpl-3.0 | 608 |
from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..utils import (
compat_str,
compat_urllib_parse,
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
(?:[?#].*|$)'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
})
return {
'_type': 'playlist',
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
| svagionitis/youtube-dl | youtube_dl/extractor/comedycentral.py | Python | unlicense | 9,941 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from op_test import OpTest
paddle.enable_static()
np.random.seed(1)
def maxout_forward_naive(x, groups, channel_axis):
s0, s1, s2, s3 = x.shape
if channel_axis == 1:
return np.ndarray([s0, s1 // groups, groups, s2, s3], \
buffer = x, dtype=x.dtype).max(axis=2)
return np.ndarray([s0, s1, s2, s3 // groups, groups], \
buffer = x, dtype=x.dtype).max(axis=4)
class TestMaxOutOp(OpTest):
def setUp(self):
self.op_type = "maxout"
self.dtype = 'float64'
self.shape = [3, 6, 2, 4]
self.groups = 2
self.axis = 1
self.set_attrs()
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
out = maxout_forward_naive(x, self.groups, self.axis)
self.inputs = {'X': x}
self.attrs = {'groups': self.groups, 'axis': self.axis}
self.outputs = {'Out': out}
def set_attrs(self):
pass
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestMaxOutOpAxis0(TestMaxOutOp):
def set_attrs(self):
self.axis = -1
class TestMaxOutOpAxis1(TestMaxOutOp):
def set_attrs(self):
self.axis = 3
class TestMaxOutOpFP32(TestMaxOutOp):
def set_attrs(self):
self.dtype = 'float32'
class TestMaxOutOpGroups(TestMaxOutOp):
def set_attrs(self):
self.groups = 3
class TestMaxoutAPI(unittest.TestCase):
# test paddle.nn.Maxout, paddle.nn.functional.maxout
def setUp(self):
self.x_np = np.random.uniform(-1, 1, [2, 6, 5, 4]).astype(np.float64)
self.groups = 2
self.axis = 1
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.maxout(x, self.groups, self.axis)
m = paddle.nn.Maxout(self.groups, self.axis)
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis)
for r in res:
self.assertTrue(np.allclose(out_ref, r))
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.maxout(x, self.groups, self.axis)
m = paddle.nn.Maxout(self.groups, self.axis)
out2 = m(x)
out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis)
for r in [out1, out2]:
self.assertTrue(np.allclose(out_ref, r.numpy()))
out3 = F.maxout(x, self.groups, -1)
out3_ref = maxout_forward_naive(self.x_np, self.groups, -1)
self.assertTrue(np.allclose(out3_ref, out3.numpy()))
paddle.enable_static()
def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.maxout(x, groups=self.groups, axis=self.axis)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis)
self.assertTrue(np.allclose(out_ref, res[0]))
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.fluid.layers.maxout(x, groups=self.groups, axis=self.axis)
self.assertTrue(np.allclose(out_ref, out.numpy()))
paddle.enable_static()
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.maxout, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.fluid.data(
name='x_int32', shape=[2, 4, 6, 8], dtype='int32')
self.assertRaises(TypeError, F.maxout, x_int32)
x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)
if __name__ == '__main__':
unittest.main()
| luotao1/Paddle | python/paddle/fluid/tests/unittests/test_maxout_op.py | Python | apache-2.0 | 5,104 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.cqlengine.columns import Column, Set, List, Text
from cassandra.cqlengine.operators import *
from cassandra.cqlengine.statements import (UpdateStatement, WhereClause,
AssignmentClause, SetUpdateClause,
ListUpdateClause)
import six
class UpdateStatementTests(unittest.TestCase):
def test_table_rendering(self):
""" tests that fields are properly added to the select statement """
us = UpdateStatement('table')
self.assertTrue(six.text_type(us).startswith('UPDATE table SET'), six.text_type(us))
self.assertTrue(str(us).startswith('UPDATE table SET'), str(us))
def test_rendering(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us))
us.add_where(Column(db_field='a'), NotEqualsOperator(), 'y')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', six.text_type(us))
def test_context(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'})
def test_context_update(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
us.update_context_id(3)
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s')
self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'})
def test_additional_rendering(self):
us = UpdateStatement('table', ttl=60)
us.add_assignment(Column(db_field='a'), 'b')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertIn('USING TTL 60', six.text_type(us))
def test_update_set_add(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set((1,)), 'add')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s')
def test_update_empty_set_add_does_not_assign(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set(), 'add')
self.assertFalse(us.assignments)
def test_update_empty_set_removal_does_not_assign(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set(), 'remove')
self.assertFalse(us.assignments)
def test_update_list_prepend_with_empty_list(self):
us = UpdateStatement('table')
us.add_update(List(Text, db_field='a'), [], 'prepend')
self.assertFalse(us.assignments)
def test_update_list_append_with_empty_list(self):
us = UpdateStatement('table')
us.add_update(List(Text, db_field='a'), [], 'append')
self.assertFalse(us.assignments)
| mambocab/python-driver | tests/integration/cqlengine/statements/test_update_statement.py | Python | apache-2.0 | 3,975 |
# Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api import openstack
from nova.api.openstack import compute
from nova.api.openstack import wsgi
from nova.tests.functional.api import client
from nova.tests.functional import api_paste_fixture
from nova.tests.functional import test_servers
from nova.tests.unit import fake_network
class LegacyV2CompatibleTestBase(test_servers.ServersTestBase):
_api_version = 'v2'
def setUp(self):
self.useFixture(api_paste_fixture.ApiPasteV2CompatibleFixture())
super(LegacyV2CompatibleTestBase, self).setUp()
self._check_api_endpoint('/v2', [compute.APIRouterV21,
openstack.LegacyV2CompatibleWrapper])
def test_request_with_microversion_headers(self):
response = self.api.api_post('os-keypairs',
{"keypair": {"name": "test"}},
headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'})
self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers)
self.assertNotIn('Vary', response.headers)
self.assertNotIn('type', response.body["keypair"])
def test_request_without_addtional_properties_check(self):
response = self.api.api_post('os-keypairs',
{"keypair": {"name": "test", "foooooo": "barrrrrr"}},
headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'})
self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers)
self.assertNotIn('Vary', response.headers)
self.assertNotIn('type', response.body["keypair"])
def test_request_with_pattern_properties_check(self):
fake_network.set_stub_network_methods(self.stubs)
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self._wait_for_state_change(created_server, 'BUILD')
response = self.api.post_server_metadata(created_server['id'],
{'a': 'b'})
self.assertEqual(response, {'a': 'b'})
def test_request_with_pattern_properties_with_avoid_metadata(self):
fake_network.set_stub_network_methods(self.stubs)
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
exc = self.assertRaises(client.OpenStackApiException,
self.api.post_server_metadata,
created_server['id'],
{'a': 'b',
'x' * 300: 'y',
'h' * 300: 'i'})
self.assertEqual(exc.response.status_code, 400)
| nikesh-mahalka/nova | nova/tests/functional/test_legacy_v2_compatible_wrapper.py | Python | apache-2.0 | 3,299 |
__author__ = 'lorenzo'
#
# http://stackoverflow.com/a/29681061/2536357
#
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
# run from the project root:
# pip install -t lib -r requirements.txt
# Uncomment if appstat is on
#def webapp_add_wsgi_middleware(app):
# from google.appengine.ext.appstats import recording
# app = recording.appstats_wsgi_middleware(app)
# return app
| Mec-iS/semantic-data-chronos | appengine_config.py | Python | apache-2.0 | 441 |
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
##
# Implements SFA GID. GIDs are based on certificates, and the GID class is a
# descendant of the certificate class.
##
import xmlrpclib
import uuid
from handler.geni.v3.extensions.sfa.trust.certificate import Certificate
from handler.geni.v3.extensions.sfa.util.faults import GidInvalidParentHrn, GidParentHrn
from handler.geni.v3.extensions.sfa.util.sfalogging import logger
from handler.geni.v3.extensions.sfa.util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn
##
# Create a new uuid. Returns the UUID as a string.
def create_uuid():
return str(uuid.uuid4().int)
##
# GID is a tuple:
# (uuid, urn, public_key)
#
# UUID is a unique identifier and is created by the python uuid module
# (or the utility function create_uuid() in gid.py).
#
# HRN is a human readable name. It is a dotted form similar to a backward domain
# name. For example, planetlab.us.arizona.bakers.
#
# URN is a human readable identifier of form:
# "urn:publicid:IDN+toplevelauthority[:sub-auth.]*[\res. type]\ +object name"
# For example, urn:publicid:IDN+planetlab:us:arizona+user+bakers
#
# PUBLIC_KEY is the public key of the principal identified by the UUID/HRN.
# It is a Keypair object as defined in the cert.py module.
#
# It is expected that there is a one-to-one pairing between UUIDs and HRN,
# but it is uncertain how this would be inforced or if it needs to be enforced.
#
# These fields are encoded using xmlrpc into the subjectAltName field of the
# x509 certificate. Note: Call encode() once the fields have been filled in
# to perform this encoding.
class GID(Certificate):
uuid = None
hrn = None
urn = None
email = None # for adding to the SubjectAltName
##
# Create a new GID object
#
# @param create If true, create the X509 certificate
# @param subject If subject!=None, create the X509 cert and set the subject name
# @param string If string!=None, load the GID from a string
# @param filename If filename!=None, load the GID from a file
# @param lifeDays life of GID in days - default is 1825==5 years
def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825):
Certificate.__init__(self, lifeDays, create, subject, string, filename)
if subject:
logger.debug("Creating GID for subject: %s" % subject)
if uuid:
self.uuid = int(uuid)
if hrn:
self.hrn = hrn
self.urn = hrn_to_urn(hrn, 'unknown')
if urn:
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def set_uuid(self, uuid):
if isinstance(uuid, str):
self.uuid = int(uuid)
else:
self.uuid = uuid
def get_uuid(self):
if not self.uuid:
self.decode()
return self.uuid
def set_hrn(self, hrn):
self.hrn = hrn
def get_hrn(self):
if not self.hrn:
self.decode()
return self.hrn
def set_urn(self, urn):
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def get_urn(self):
if not self.urn:
self.decode()
return self.urn
# Will be stuffed into subjectAltName
def set_email(self, email):
self.email = email
def get_email(self):
if not self.email:
self.decode()
return self.email
def get_type(self):
if not self.urn:
self.decode()
_, t = urn_to_hrn(self.urn)
return t
##
# Encode the GID fields and package them into the subject-alt-name field
# of the X509 certificate. This must be called prior to signing the
# certificate. It may only be called once per certificate.
def encode(self):
if self.urn:
urn = self.urn
else:
urn = hrn_to_urn(self.hrn, None)
str = "URI:" + urn
if self.uuid:
str += ", " + "URI:" + uuid.UUID(int=self.uuid).urn
if self.email:
str += ", " + "email:" + self.email
self.set_data(str, 'subjectAltName')
##
# Decode the subject-alt-name field of the X509 certificate into the
# fields of the GID. This is automatically called by the various get_*()
# functions in this class.
def decode(self):
data = self.get_data('subjectAltName')
dict = {}
if data:
if data.lower().startswith('uri:http://<params>'):
dict = xmlrpclib.loads(data[11:])[0][0]
else:
spl = data.split(', ')
for val in spl:
if val.lower().startswith('uri:urn:uuid:'):
dict['uuid'] = uuid.UUID(val[4:]).int
elif val.lower().startswith('uri:urn:publicid:idn+'):
dict['urn'] = val[4:]
elif val.lower().startswith('email:'):
# FIXME: Ensure there isn't cruft in that address...
# EG look for email:copy,....
dict['email'] = val[6:]
self.uuid = dict.get("uuid", None)
self.urn = dict.get("urn", None)
self.hrn = dict.get("hrn", None)
self.email = dict.get("email", None)
if self.urn:
self.hrn = urn_to_hrn(self.urn)[0]
##
# Dump the credential to stdout.
#
# @param indent specifies a number of spaces to indent the output
# @param dump_parents If true, also dump the parents of the GID
def dump(self, *args, **kwargs):
print self.dump_string(*args,**kwargs)
def dump_string(self, indent=0, dump_parents=False):
result=" "*(indent-2) + "GID\n"
result += " "*indent + "hrn:" + str(self.get_hrn()) +"\n"
result += " "*indent + "urn:" + str(self.get_urn()) +"\n"
result += " "*indent + "uuid:" + str(self.get_uuid()) + "\n"
if self.get_email() is not None:
result += " "*indent + "email:" + str(self.get_email()) + "\n"
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
if self.parent and dump_parents:
result += " "*indent + "parent:\n"
result += self.parent.dump_string(indent+4, dump_parents)
return result
##
# Verify the chain of authenticity of the GID. First perform the checks
# of the certificate class (verifying that each parent signs the child,
# etc). In addition, GIDs also confirm that the parent's HRN is a prefix
# of the child's HRN, and the parent is of type 'authority'.
#
# Verifying these prefixes prevents a rogue authority from signing a GID
# for a principal that is not a member of that authority. For example,
# planetlab.us.arizona cannot sign a GID for planetlab.us.princeton.foo.
def verify_chain(self, trusted_certs = None):
# do the normal certificate verification stuff
trusted_root = Certificate.verify_chain(self, trusted_certs)
if self.parent:
# make sure the parent's hrn is a prefix of the child's hrn
if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):
raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))
# Parent must also be an authority (of some type) to sign a GID
# There are multiple types of authority - accept them all here
if not self.parent.get_type().find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
# Then recurse up the chain - ensure the parent is a trusted
# root or is in the namespace of a trusted root
self.parent.verify_chain(trusted_certs)
else:
# make sure that the trusted root's hrn is a prefix of the child's
trusted_gid = GID(string=trusted_root.save_to_string())
trusted_type = trusted_gid.get_type()
trusted_hrn = trusted_gid.get_hrn()
#if trusted_type == 'authority':
# trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]
cur_hrn = self.get_hrn()
if not hrn_authfor_hrn(trusted_hrn, cur_hrn):
raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert: %s" % (trusted_hrn, cur_hrn))
# There are multiple types of authority - accept them all here
if not trusted_type.find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))
return
| dana-i2cat/felix | modules/resource/manager/stitching-entity/src/handler/geni/v3/extensions/sfa/trust/gid.py | Python | apache-2.0 | 10,122 |
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.quantity import Amount, Time
from twitter.pants.targets.python_target import PythonTarget
class PythonTests(PythonTarget):
def __init__(self, name, sources, resources=None, dependencies=None,
timeout=Amount(2, Time.MINUTES),
soft_dependencies=False):
"""
name / sources / resources / dependencies: See PythonLibrary target
timeout: Amount of time before this test should be considered timed-out
[Default: 2 minutes]
soft_dependencies: Whether or not we should ignore dependency resolution
errors for this test. [Default: False]
"""
self._timeout = timeout
self._soft_dependencies = bool(soft_dependencies)
PythonTarget.__init__(self, name, sources, resources, dependencies)
self.add_label('python')
self.add_label('tests')
@property
def timeout(self):
return self._timeout
class PythonTestSuite(PythonTarget):
def __init__(self, name, dependencies=None):
PythonTarget.__init__(self, name, (), (), dependencies)
| foursquare/commons-old | src/python/twitter/pants/targets/python_tests.py | Python | apache-2.0 | 1,972 |
# coding: utf-8
'''
Name : ThammeGowda Narayanaswamy
USCID: 2074669439
'''
import math
from scipy.stats import multivariate_normal
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import scipy as sp
from scipy import spatial
from scipy import stats
from pprint import pprint
blob_file = "hw5_blob.csv"
circle_file = "hw5_circle.csv"
def load_points(f_name):
with open(f_name) as f:
res = []
for l in f:
x,y = l.split(",")
res.append([float(x), float(y)])
return np.array(res)
blobs = load_points(blob_file)
circles = load_points(circle_file)
'''
# In[4]:
plt.plot(*zip(*circles), marker='o', color='r', ls='')
plt.show()
plt.plot(*zip(*blobs), marker='o', color='b', ls='')
plt.show()
'''
# In[5]:
def k_means(k, pts, get_indices=False, silent=True, tol=1e-5):
N = len(pts)
assert k <= N
print("K=%d, N=%d" % (k, N))
# pick random k points
pos = set()
while len(pos) < k:
r = np.random.randint(N)
pos.add(r)
centroids = []
for p in pos:
centroids.append(tuple(pts[p]))
change = float('inf')
conv_tol = 1e-5
itr, max_iters = 0, 100
while change > tol and itr < max_iters:
itr += 1
# assign cluster to each point
asgn = {}
indices = {}
for ct in centroids:
asgn[ct] = []
indices[ct] = []
for idx, pt in enumerate(pts):
mindist = float('inf')
a = None
for ct in centroids:
dist = spatial.distance.cdist([ct], [pt])
if dist < mindist:
mindist = dist
a = ct
asgn[a].append(pt)
indices[a].append(idx)
# compute means of each cluster
oldcentr = centroids
centroids = []
for ct, cluster in asgn.items():
centroids.append(tuple(np.array(cluster).mean(axis=0)))
dist_matrix = spatial.distance.cdist(oldcentr, centroids)
# has distance between each pair of {new, old} centroids
# need the diagonal values
change = dist_matrix.trace()
if not silent:
print("Movement in centroids", change)
return indices if get_indices else asgn
# In[6]:
print("# K Means")
colors = ['r', 'g', 'b', 'y', 'c', 'k']
plt.figure(1, figsize=(15, 10))
plt.title("K Means")
ks = {2,3,5}
dss = {'Blobs': blobs, 'Circles': circles}
j = 1
for title, ds in dss.items():
for k in ks:
clstrs = k_means(k, ds)
plt.subplot(2, 3, j)
i = 0
for cnt, cpts in clstrs.items():
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.title("%s , K=%d" % (title, k))
j += 1
plt.show()
# # Kernel
'''
# ## Feature Mapping
# In[7]:
center = [0.0, 0.0]
newdim = sp.spatial.distance.cdist([center], circles).transpose()
clusters = k_means(2, newdim, get_indices=True)
i = 0
for cnt, cpts in clusters.items():
cpts = map(lambda x: circles[x], cpts)
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.show()
'''
# ## Kernel K Means
#
# Kernel used :
# 1 - (radius of x1) / (radius of x2)
#
# It ensures that the smaller radius goes to numerator and larger radius goes to denominator - for symmetry and bounding
print("Kernel K means")
class KernelKMeans(object):
def kernel_matrix(self, data, kernel_func):
''' Computes kernel matrix
: params:
data - data points
kernel_func - kernel function
:returns: nxn matrix
'''
n = data.shape[0]
K = np.zeros((n,n), dtype=float)
for i in range(n):
for j in range(n):
K[i,j] = kernel_func(data[i], data[j])
return K
def cluster(self, X, k, kernel_func, max_itr=100, tol=1e-3):
'''
Clusters the points
:params:
X - data points
k - number of clusters
kernel_func - kernel function that outputs smaller values for points in same cluster
:returns: Nx1 vector of assignments
'''
# N
N = X.shape[0]
# NxN matrix from kernel funnction element wise
K = self.kernel_matrix(X, kernel_func)
# equal weightage to all
cluster_weights = np.ones(N)
# Assignments : random assignments to begin with
A = np.random.randint(k, size=N)
for it in xrange(max_itr): # stuck up between 2 local minimas, abort after maxiter
# N x k matrix that stores distance between every point and cluster center
dist = self.compute_dist(K, k, A, sw=cluster_weights)
oldA, A = A, dist.argmin(axis=1)
# Check if it is conveged
n_same = np.sum(np.abs(A - oldA) == 0)
if 1 - float(n_same) / N < tol:
print "Converged at iteration:", it + 1
break
return A
def compute_dist(self, K, k, A, sw):
"""
Computes Nxk distance matrix using kernel matrix
: params:
K - NxN kernel Matrix
k - number of clusters
A - Nx1 Assignments
sw - sample weights
: returns : Nxk distance matrix
"""
dist = np.zeros((K.shape[0], k))
for cl in xrange(k):
mask = A == cl
if np.sum(mask) == 0:
raise Error("ERROR:cluster '%d' is empty. Looks like we cant make %d clusters" % (cl, k))
N_ = sw[mask].sum()
KK = K[mask][:, mask]
dist[:, cl] += np.sum(np.outer(sw[mask], sw[mask]) * KK / (N_*N_))
dist[:, cl] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / N_
return dist
def distance(x1, x2):
'''Squared Eucledian distance between 2 points
:params:
x1 - point1
x2 - point2
'''
return np.sum((x1 - x2) ** 2)
def circular_kernel(x1, x2, center=None):
'''This kernel outputs lesser distance for the points that are from circumference
:params:
x1 - first point
x2 - second point
center - center of circle(default = origin (0,0,...))
'''
if center is None:
center = np.zeros(len(x1))
dist1 = distance(x1, center)
dist2 = distance(x2, center)
return 1.0 - min(dist1, dist2) / max(dist1, dist2)
clusters = KernelKMeans().cluster(circles, 2, circular_kernel)
for i in range(k):
cpts = circles[clusters == i]
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.show()
# # EM Algorithm with GMM
print("EM Algorithm")
# In[62]:
def multivar_gaussian_pdf(x, mu, covar):
return multivariate_normal.pdf(x, mean=mu, cov=covar)
class EM_GMM(object):
def __init__(self, data, k):
self.data = data
self.k = k
self.N = data.shape[0]
# theta param
self.mean, self.cov, self.weight = [], [], []
# random initialization
A = np.random.randint(k, size=data.shape[0])
for c in range(k):
cpts = data[A == c]
self.mean.append(np.mean(cpts, axis=0))
self.cov.append(np.cov(np.array(cpts).transpose()))
self.weight.append(1.0 * cpts.shape[0] / data.shape[0])
def compute_gamma(self):
gamma = np.zeros((self.N, self.k), dtype=float)
for idx, pt in enumerate(data):
pdf = []
for ct in range(k):
temp = multivar_gaussian_pdf(pt, self.mean[ct], self.cov[ct])
pdf.append(temp * self.weight[ct])
gamma[idx] = np.array(pdf) / sum(pdf)
return gamma
def update_theta(self, P):
weights = P.sum(axis=0)/P.sum()
means = []
covs = []
for i in range(self.k):
nr_mu = (P[:, i:i+1] * self.data).sum(axis=0)
dr_mu = P[:, i].sum(axis=0)
pt_mu = nr_mu / dr_mu
means.append(pt_mu)
for i in range(self.k):
nr_cov = (P[:, i:i+1] * (self.data - means[i])).transpose().dot(self.data - means[i])
dr_cov = P[:, i].sum(axis=0)
covs.append(nr_cov / dr_cov)
self.mean= means
self.cov = covs
self.weight = weights
def log_likelihood(self):
log_sum = 0.
for _, pt in enumerate(self.data):
row_sum = []
for ct in range(self.k):
p_X_given_N = multivar_gaussian_pdf(pt, self.mean[ct], self.cov[ct])
p_N = self.weight[ct]
joint = p_N * p_X_given_N
row_sum.append(joint)
res = sum(row_sum)
log_sum += math.log(res)
return log_sum
def gmm(self, max_itr = 50):
ll = []
for itr in range(max_itr):
old_means = self.mean # used for convergance test
gamma = self.compute_gamma()
self.update_theta(gamma)
ll.append(self.log_likelihood())
if np.sum(np.abs(np.array(self.mean) - np.array(old_means))) < 1e-3:
break
return gamma, ll
data = blobs
max_ll = 0
plt.figure(1, figsize=(8, 6))
legends = []
k = 3
for i in range(1,6):
em = EM_GMM(data, k)
gamma, ll = em.gmm()
if ll >= max_ll:
best_gamma = gamma
best = em
max_ll = ll
print "Converged: ", len(ll)
plt.plot(range(len(ll)), ll , '-', color=colors[i])
legends.append(mpatches.Patch(color=colors[i], label='Iteration: %d' % i))
plt.legend(handles=legends)
plt.show()
idx = best_gamma.argmax(axis=1)
print "Best parameters: "
print "Mean:", best.mean
print "Covar:", best.cov
plt.scatter(data[:,0], data[:,1], color=[colors[i] for i in idx] )
plt.show()
| thammegowda/algos | usc-csci-ml/hw5/src/CSCI567_hw5_fall16.py | Python | apache-2.0 | 9,745 |
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xst = '''<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="topLevel">
<xs:complexType>
<xs:sequence>
<xs:element name="item" type="xs:int" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
'''
code = pyxb.binding.generate.GeneratePython(schema_text=xst)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0218 (unittest.TestCase):
def testBasic (self):
instance = topLevel()
self.assertTrue(instance.item is not None)
self.assertFalse(instance.item is None)
self.assertTrue(instance.item != None)
self.assertTrue(None != instance.item)
self.assertFalse(instance.item)
instance.item.extend([1,2,3,4])
self.assertTrue(instance.item is not None)
self.assertFalse(instance.item is None)
self.assertTrue(instance.item != None)
self.assertTrue(None != instance.item)
self.assertTrue(instance.item)
if __name__ == '__main__':
unittest.main()
| CantemoInternal/pyxb | tests/trac/test-trac-0218.py | Python | apache-2.0 | 1,347 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.services.kafka.directory import kafka_dir
import os
import subprocess
"""
0.8.2.1 MirrorMaker options
Option Description
------ -----------
--abort.on.send.failure <Stop the Configure the mirror maker to exit on
entire mirror maker when a send a failed send. (default: true)
failure occurs>
--blacklist <Java regex (String)> Blacklist of topics to mirror.
--consumer.config <config file> Embedded consumer config for consuming
from the source cluster.
--consumer.rebalance.listener <A The consumer rebalance listener to use
custom rebalance listener of type for mirror maker consumer.
ConsumerRebalanceListener>
--help Print this message.
--message.handler <A custom message Message handler which will process
handler of type every record in-between consumer and
MirrorMakerMessageHandler> producer.
--message.handler.args <Arguments Arguments used by custom rebalance
passed to message handler listener for mirror maker consumer
constructor.>
--num.streams <Integer: Number of Number of consumption streams.
threads> (default: 1)
--offset.commit.interval.ms <Integer: Offset commit interval in ms (default:
offset commit interval in 60000)
millisecond>
--producer.config <config file> Embedded producer config.
--rebalance.listener.args <Arguments Arguments used by custom rebalance
passed to custom rebalance listener listener for mirror maker consumer
constructor as a string.>
--whitelist <Java regex (String)> Whitelist of topics to mirror.
"""
class MirrorMaker(Service):
# Root directory for persistent output
PERSISTENT_ROOT = "/mnt/mirror_maker"
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "mirror_maker.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
PRODUCER_CONFIG = os.path.join(PERSISTENT_ROOT, "producer.properties")
CONSUMER_CONFIG = os.path.join(PERSISTENT_ROOT, "consumer.properties")
logs = {
"mirror_maker_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, source, target, whitelist=None, blacklist=None, num_streams=1, consumer_timeout_ms=None):
"""
MirrorMaker mirrors messages from one or more source clusters to a single destination cluster.
Args:
context: standard context
source: source Kafka cluster
target: target Kafka cluster to which data will be mirrored
whitelist: whitelist regex for topics to mirror
blacklist: blacklist regex for topics not to mirror
num_streams: number of consumer threads to create; can be a single int, or a list with
one value per node, allowing num_streams to be the same for each node,
or configured independently per-node
consumer_timeout_ms: consumer stops if t > consumer_timeout_ms elapses between consecutive messages
"""
super(MirrorMaker, self).__init__(context, num_nodes=num_nodes)
self.consumer_timeout_ms = consumer_timeout_ms
self.num_streams = num_streams
if not isinstance(num_streams, int):
# if not an integer, num_streams should be configured per-node
assert len(num_streams) == num_nodes
self.whitelist = whitelist
self.blacklist = blacklist
self.source = source
self.target = target
def start_cmd(self, node):
cmd = "export LOG_DIR=%s;" % MirrorMaker.LOG_DIR
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\";" % MirrorMaker.LOG4J_CONFIG
cmd += " /opt/%s/bin/kafka-run-class.sh kafka.tools.MirrorMaker" % kafka_dir(node)
cmd += " --consumer.config %s" % MirrorMaker.CONSUMER_CONFIG
cmd += " --producer.config %s" % MirrorMaker.PRODUCER_CONFIG
if isinstance(self.num_streams, int):
cmd += " --num.streams %d" % self.num_streams
else:
# config num_streams separately on each node
cmd += " --num.streams %d" % self.num_streams[self.idx(node) - 1]
if self.whitelist is not None:
cmd += " --whitelist=\"%s\"" % self.whitelist
if self.blacklist is not None:
cmd += " --blacklist=\"%s\"" % self.blacklist
cmd += " 1>> %s 2>> %s &" % (MirrorMaker.LOG_FILE, MirrorMaker.LOG_FILE)
return cmd
def pids(self, node):
try:
cmd = "ps ax | grep -i MirrorMaker | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def start_node(self, node):
node.account.ssh("mkdir -p %s" % MirrorMaker.PERSISTENT_ROOT, allow_fail=False)
node.account.ssh("mkdir -p %s" % MirrorMaker.LOG_DIR, allow_fail=False)
# Create, upload one consumer config file for source cluster
consumer_props = self.render('consumer.properties', zookeeper_connect=self.source.zk.connect_setting())
node.account.create_file(MirrorMaker.CONSUMER_CONFIG, consumer_props)
# Create, upload producer properties file for target cluster
producer_props = self.render('producer.properties', broker_list=self.target.bootstrap_servers(),
producer_type="async")
node.account.create_file(MirrorMaker.PRODUCER_CONFIG, producer_props)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=MirrorMaker.LOG_FILE)
node.account.create_file(MirrorMaker.LOG4J_CONFIG, log_config)
# Run mirror maker
cmd = self.start_cmd(node)
self.logger.debug("Mirror maker command: %s", cmd)
node.account.ssh(cmd, allow_fail=False)
wait_until(lambda: self.alive(node), timeout_sec=10, backoff_sec=.5,
err_msg="Mirror maker took to long to start.")
self.logger.debug("Mirror maker is alive")
def stop_node(self, node):
node.account.kill_process("java", allow_fail=True)
wait_until(lambda: not self.alive(node), timeout_sec=10, backoff_sec=.5,
err_msg="Mirror maker took to long to stop.")
def clean_node(self, node):
if self.alive(node):
self.logger.warn("%s %s was still alive at cleanup time. Killing forcefully..." %
(self.__class__.__name__, node.account))
node.account.kill_process("java", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % MirrorMaker.PERSISTENT_ROOT, allow_fail=False)
| bluebreezecf/kafka | tests/kafkatest/services/mirror_maker.py | Python | apache-2.0 | 8,150 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import textwrap
import mock
import pep8
from nova.hacking import checks
from nova import test
class HackingTestCase(test.NoDBTestCase):
"""This class tests the hacking checks in nova.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_virt_driver_imports(self):
expect = (0, "N311: importing code from other virt drivers forbidden")
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"import nova.virt.libvirt.utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/libvirt/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"import nova.virt.firewall",
"./nova/virt/libvirt/firewall.py"))
def test_virt_driver_config_vars(self):
self.assertIsInstance(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/xenapi/driver.py"), tuple)
self.assertIsNone(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/libvirt/volume.py"))
def test_no_vi_headers(self):
lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n',
'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n',
'Line 11\n', 'Line 12\n', 'Line 13\n', 'Line14\n', 'Line15\n']
self.assertIsNone(checks.no_vi_headers(
"Test string foo", 1, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
2, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
6, lines))
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
9, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
14, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"Test end string for vi",
15, lines))
def test_assert_true_instance(self):
self.assertEqual(len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))), 1)
self.assertEqual(
len(list(checks.assert_true_instance("self.assertTrue()"))), 0)
def test_assert_equal_type(self):
self.assertEqual(len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_type("self.assertTrue()"))), 0)
def test_assert_equal_in(self):
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), True)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, any(a==1 for a in b))"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), False)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, any(a==1 for a in b))"))), 0)
def test_assert_equal_none(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
def test_assert_true_or_false_with_in_or_not_in(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in ['1', '2', '3'])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in [1, 2, 3])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B))"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B), 'some message')"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(some in list1 and some2 in list2)"))), 0)
def test_no_translate_debug_logs(self):
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "nova/scheduler/foo.py"))), 0)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0)
def test_no_setting_conf_directly_in_tests(self):
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.group.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = foo = 1", "nova/tests/test_foo.py"))), 1)
# Shouldn't fail with comparisons
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option == 'foo'", "nova/tests/test_foo.py"))), 0)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option != 1", "nova/tests/test_foo.py"))), 0)
# Shouldn't fail since not in nova/tests/
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/compute/foo.py"))), 0)
def test_log_translations(self):
logs = ['audit', 'error', 'info', 'warning', 'critical', 'warn',
'exception']
levels = ['_LI', '_LW', '_LE', '_LC']
debug = "LOG.debug('OK')"
audit = "LOG.audit(_('OK'))"
self.assertEqual(
0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
self.assertEqual(
0, len(list(checks.validate_log_translations(audit, audit, 'f'))))
for log in logs:
bad = 'LOG.%s("Bad")' % log
self.assertEqual(1,
len(list(
checks.validate_log_translations(bad, bad, 'f'))))
ok = "LOG.%s('OK') # noqa" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s(variable)" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
for level in levels:
ok = "LOG.%s(%s('OK'))" % (log, level)
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
" def fake_suds_context(calls={}):"))))
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
def test_check_explicit_underscore_import(self):
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _, _LW",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"_ = translations.ugettext",
"cinder/tests/other_files3.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files3.py"))), 0)
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("N324: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(
__get_msg(method),
list(checks.use_jsonutils("json.%s(" % method,
"./nova/virt/xenapi/driver.py")))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.%s(" % method,
"./plugins/xenserver/script.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("jsonx.%s(" % method,
"./nova/virt/xenapi/driver.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.dumb",
"./nova/virt/xenapi/driver.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_str_unicode_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = str(e)
return p
"""
errors = [(5, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = unicode(a) + str(b)
except ValueError as e:
p = e
return p
"""
self._assert_has_no_errors(code, checker)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
errors = [(5, 20, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
p = e
return p
"""
errors = [(8, 20, 'N325'), (8, 29, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + unicode(ve)
p = str(e)
return p
"""
errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_api_version_decorator_check(self):
code = """
@some_other_decorator
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_errors(code, checks.check_api_version_decorator,
expected_errors=[(2, 0, "N332")])
def test_oslo_namespace_imports_check(self):
code = """
from oslo.concurrency import processutils
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_namespace_imports_check_2(self):
code = """
from oslo import i18n
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_namespace_imports_check_3(self):
code = """
import oslo.messaging
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_assert_raises_regexp(self):
code = """
self.assertRaisesRegexp(ValueError,
"invalid literal for.*XYZ'$",
int,
'XYZ')
"""
self._assert_has_errors(code, checks.assert_raises_regexp,
expected_errors=[(1, 0, "N335")])
def test_api_version_decorator_check_no_errors(self):
code = """
class ControllerClass():
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_no_errors(code, checks.check_api_version_decorator)
def test_trans_add(self):
checker = checks.CheckForTransAdd
code = """
def fake_tran(msg):
return msg
_ = fake_tran
_LI = _
_LW = _
_LE = _
_LC = _
def f(a, b):
msg = _('test') + 'add me'
msg = _LI('test') + 'add me'
msg = _LW('test') + 'add me'
msg = _LE('test') + 'add me'
msg = _LC('test') + 'add me'
msg = 'add to me' + _('test')
return msg
"""
errors = [(13, 10, 'N326'), (14, 10, 'N326'), (15, 10, 'N326'),
(16, 10, 'N326'), (17, 10, 'N326'), (18, 24, 'N326')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
msg = 'test' + 'add me'
return msg
"""
self._assert_has_no_errors(code, checker)
def test_dict_constructor_with_list_copy(self):
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([(i, connect_info[i])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" attrs = dict([(k, _from_json(v))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" type_names = dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
"foo(param=dict((k, v) for k, v in bar.items()))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([[i,i] for i in range(3)])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dd = dict([i,i] for i in range(3))"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" create_kwargs = dict(snapshot=snapshot,"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" self._render_dict(xml, data_el, data.__dict__)"))))
| orbitfp7/nova | nova/tests/unit/test_hacking.py | Python | apache-2.0 | 22,417 |
"""
Test how many times newly loaded binaries are notified;
they should be delivered in batches instead of one-by-one.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ModuleLoadedNotifysTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
# DynamicLoaderDarwin should batch up notifications about
# newly added/removed libraries. Other DynamicLoaders may
# not be written this way.
@skipUnlessDarwin
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// breakpoint')
def test_launch_notifications(self):
"""Test that lldb broadcasts newly loaded libraries in batches."""
self.build()
exe = self.getBuildArtifact("a.out")
self.dbg.SetAsync(False)
listener = self.dbg.GetListener()
listener.StartListeningForEventClass(
self.dbg,
lldb.SBTarget.GetBroadcasterClassName(),
lldb.SBTarget.eBroadcastBitModulesLoaded | lldb.SBTarget.eBroadcastBitModulesUnloaded)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# break on main
breakpoint = target.BreakpointCreateByName('main', 'a.out')
event = lldb.SBEvent()
# CreateTarget() generated modules-loaded events; consume them & toss
while listener.GetNextEvent(event):
True
error = lldb.SBError()
flags = target.GetLaunchInfo().GetLaunchFlags()
process = target.Launch(listener,
None, # argv
None, # envp
None, # stdin_path
None, # stdout_path
None, # stderr_path
None, # working directory
flags, # launch flags
False, # Stop at entry
error) # error
self.assertTrue(
process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
total_solibs_added = 0
total_solibs_removed = 0
total_modules_added_events = 0
total_modules_removed_events = 0
while listener.GetNextEvent(event):
if lldb.SBTarget.EventIsTargetEvent(event):
if event.GetType() == lldb.SBTarget.eBroadcastBitModulesLoaded:
solib_count = lldb.SBTarget.GetNumModulesFromEvent(event)
total_modules_added_events += 1
total_solibs_added += solib_count
if self.TraceOn():
# print all of the binaries that have been added
added_files = []
i = 0
while i < solib_count:
module = lldb.SBTarget.GetModuleAtIndexFromEvent(i, event)
added_files.append(module.GetFileSpec().GetFilename())
i = i + 1
print("Loaded files: %s" % (', '.join(added_files)))
if event.GetType() == lldb.SBTarget.eBroadcastBitModulesUnloaded:
solib_count = lldb.SBTarget.GetNumModulesFromEvent(event)
total_modules_removed_events += 1
total_solibs_removed += solib_count
if self.TraceOn():
# print all of the binaries that have been removed
removed_files = []
i = 0
while i < solib_count:
module = lldb.SBTarget.GetModuleAtIndexFromEvent(i, event)
removed_files.append(module.GetFileSpec().GetFilename())
i = i + 1
print("Unloaded files: %s" % (', '.join(removed_files)))
# This is testing that we get back a small number of events with the loaded
# binaries in batches. Check that we got back more than 1 solib per event.
# In practice on Darwin today, we get back two events for a do-nothing c
# program: a.out and dyld, and then all the rest of the system libraries.
avg_solibs_added_per_event = int(float(total_solibs_added) / float(total_modules_added_events))
self.assertGreater(avg_solibs_added_per_event, 1)
| google/llvm-propeller | lldb/test/API/functionalities/target-new-solib-notifications/TestModuleLoadedNotifys.py | Python | apache-2.0 | 4,739 |
import types
import unittest
from collections import namedtuple
import os
import sys
import tempfile
from zipfile import ZipFile, ZipInfo
from utils import jar_utils
sys.path.append('tests/unit/')
import mock
from plugins.systems.config_container_crawler import ConfigContainerCrawler
from plugins.systems.config_host_crawler import ConfigHostCrawler
from plugins.systems.connection_container_crawler import ConnectionContainerCrawler
from plugins.systems.connection_host_crawler import ConnectionHostCrawler
from plugins.systems.connection_vm_crawler import ConnectionVmCrawler
from plugins.systems.cpu_container_crawler import CpuContainerCrawler
from plugins.systems.cpu_host_crawler import CpuHostCrawler
from plugins.systems.disk_container_crawler import DiskContainerCrawler
from plugins.systems.disk_host_crawler import DiskHostCrawler
from plugins.systems.dockerhistory_container_crawler import DockerhistoryContainerCrawler
from plugins.systems.dockerinspect_container_crawler import DockerinspectContainerCrawler
from plugins.systems.dockerps_host_crawler import DockerpsHostCrawler
from plugins.systems.file_container_crawler import FileContainerCrawler
from plugins.systems.file_host_crawler import FileHostCrawler
from plugins.systems.interface_container_crawler import InterfaceContainerCrawler
from plugins.systems.interface_host_crawler import InterfaceHostCrawler
from plugins.systems.interface_vm_crawler import InterfaceVmCrawler
from plugins.systems.jar_container_crawler import JarContainerCrawler
from plugins.systems.jar_host_crawler import JarHostCrawler
from plugins.systems.load_container_crawler import LoadContainerCrawler
from plugins.systems.load_host_crawler import LoadHostCrawler
from plugins.systems.memory_container_crawler import MemoryContainerCrawler
from plugins.systems.memory_host_crawler import MemoryHostCrawler
from plugins.systems.memory_vm_crawler import MemoryVmCrawler
from plugins.systems.metric_container_crawler import MetricContainerCrawler
from plugins.systems.metric_host_crawler import MetricHostCrawler
from plugins.systems.metric_vm_crawler import MetricVmCrawler
from plugins.systems.os_container_crawler import OSContainerCrawler
from plugins.systems.os_host_crawler import OSHostCrawler
from plugins.systems.os_vm_crawler import os_vm_crawler
from plugins.systems.package_container_crawler import PackageContainerCrawler
from plugins.systems.package_host_crawler import PackageHostCrawler
from plugins.systems.process_container_crawler import ProcessContainerCrawler
from plugins.systems.process_host_crawler import ProcessHostCrawler
from plugins.systems.process_vm_crawler import process_vm_crawler
from container import Container
from utils.crawler_exceptions import CrawlError
from utils.features import (
OSFeature,
ConfigFeature,
DiskFeature,
PackageFeature,
MemoryFeature,
CpuFeature,
InterfaceFeature,
LoadFeature,
DockerPSFeature,
JarFeature)
# for OUTVM psvmi
class DummyContainer(Container):
def __init__(self, long_id):
self.pid = '1234'
self.long_id = long_id
def get_memory_cgroup_path(self, node):
return '/cgroup/%s' % node
def get_cpu_cgroup_path(self, node):
return '/cgroup/%s' % node
# for OUTVM psvmi
psvmi_sysinfo = namedtuple('psvmi_sysinfo',
'''boottime ipaddr osdistro osname osplatform osrelease
ostype osversion memory_used memory_buffered
memory_cached memory_free''')
psvmi_memory = namedtuple(
'psvmi_memory',
'memory_used memory_buffered memory_cached memory_free')
psvmi_interface = namedtuple(
'psvmi_interface',
'ifname bytes_sent bytes_recv packets_sent packets_recv errout errin')
os_stat = namedtuple(
'os_stat',
'''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size''')
def mocked_os_walk(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
def mocked_os_walk_for_avoidsetns(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/1/2/3', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/1/2/3/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
# XXX can't do self.count = for some reason
mcount = 0
class MockedMemCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
return '2'
def __iter__(self):
return self
def next(self):
global mcount
mcount += 1
if mcount == 1:
return 'total_cache 100'
if mcount == 2:
return 'total_active_file 200'
else:
raise StopIteration()
# XXX can't do self.count = for some reason
ccount = 0
ccount2 = 0
class MockedCpuCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
global ccount2
ccount2 += 1
if ccount2 == 1:
return '1e7'
else:
return '2e7'
def __iter__(self):
return self
def next(self):
global ccount
ccount += 1
if ccount == 1:
return 'system 20'
if ccount == 2:
return 'user 20'
else:
raise StopIteration()
class MockedFile(mock.Mock):
def __init__(self):
pass
def read(self):
return 'content'
def mocked_codecs_open(filename, mode, encoding, errors):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedFile())
m.__exit__ = mock.Mock(return_value=False)
return m
def mocked_cpu_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedCpuCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
def mocked_memory_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedMemCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
partition = namedtuple('partition', 'device fstype mountpoint opts')
pdiskusage = namedtuple('pdiskusage', 'percent total')
meminfo = namedtuple('meminfo', 'rss vms')
ioinfo = namedtuple('ioinfo', 'read_bytes write_bytes')
psutils_memory = namedtuple('psutils_memory', 'used free buffers cached')
psutils_cpu = namedtuple(
'psutils_cpu',
'idle nice user iowait system irq steal')
psutils_net = namedtuple(
'psutils_net',
'bytes_sent bytes_recv packets_sent packets_recv errout errin')
def mocked_disk_partitions(all):
return [partition('/dev/a', 'type', '/a', 'opts'),
partition('/dev/b', 'type', '/b', 'opts')]
class Connection():
def __init__(self):
self.laddr = ['1.1.1.1', '22']
self.raddr = ['2.2.2.2', '22']
self.status = 'Established'
class Process():
def __init__(self, name):
self.name = name
self.cmdline = ['cmd']
self.pid = 123
self.status = 'Running'
self.cwd = '/bin'
self.ppid = 1
self.create_time = 1000
def num_threads(self):
return 1
def username(self):
return 'don quijote'
def get_open_files(self):
return []
def get_connections(self):
return [Connection()]
def get_memory_info(self):
return meminfo(10, 20)
def get_io_counters(self):
return ioinfo(10, 20)
def get_cpu_percent(self, interval):
return 30
def get_memory_percent(self):
return 30
STAT_DIR_MODE = 16749
def mocked_os_lstat(path):
print path
if path == '/':
return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7)
elif path == '/file1':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file2':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file3':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/dir':
return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7)
else:
return os_stat(1, 2, 3, 4, 5, 6, 7)
def mocked_run_as_another_namespace(pid, ns, function, *args, **kwargs):
result = function(*args)
# if res is a generator (i.e. function uses yield)
if isinstance(result, types.GeneratorType):
result = list(result)
return result
def throw_os_error(*args, **kvargs):
raise OSError()
class PluginTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self, *args):
pass
@mock.patch('utils.os_utils.time.time',
side_effect=lambda: 1001)
@mock.patch('utils.os_utils.platform.platform',
side_effect=lambda: 'platform')
@mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses',
side_effect=lambda: ['1.1.1.1'])
@mock.patch('utils.os_utils.psutil.boot_time',
side_effect=lambda: 1000)
@mock.patch('utils.os_utils.platform.system',
side_effect=lambda: 'linux')
@mock.patch('utils.os_utils.platform.machine',
side_effect=lambda: 'machine')
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_host_cawler_plugin(self, *args):
fc = OSHostCrawler()
for os in fc.crawl():
print os
assert os == (
'linux',
OSFeature(
boottime=1000,
uptime=1,
ipaddr=['1.1.1.1'],
os='os',
os_version='os_version',
os_kernel='platform',
architecture='machine'),
'os')
for i, arg in enumerate(args):
if i > 0: # time.time is called more than once
continue
assert arg.call_count == 1
@mock.patch('utils.os_utils.platform.system',
side_effect=throw_os_error)
def test_os_host_crawler_plugin_failure(self, *args):
fc = OSHostCrawler()
with self.assertRaises(OSError):
for os in fc.crawl():
pass
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_host_crawler_plugin_mountpoint_mode(self, *args):
fc = OSHostCrawler()
for os in fc.crawl(root_dir='/a'):
print os
assert os == (
'linux',
OSFeature(
boottime='unsupported',
uptime='unsupported',
ipaddr='0.0.0.0',
os='os',
os_version='os_version',
os_kernel='unknown',
architecture='unknown'),
'os')
for i, arg in enumerate(args):
assert arg.call_count == 1
@mock.patch('utils.os_utils.osinfo.get_osinfo',
side_effect=throw_os_error)
def test_os_host_crawler_plugin_mountpoint_mode_failure(self, *args):
fc = OSHostCrawler()
with self.assertRaises(OSError):
for os in fc.crawl(root_dir='/a'):
pass
@mock.patch('utils.os_utils.time.time',
side_effect=lambda: 1001)
@mock.patch('utils.os_utils.platform.platform',
side_effect=lambda: 'platform')
@mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses',
side_effect=lambda: ['1.1.1.1'])
@mock.patch('utils.os_utils.psutil.boot_time',
side_effect=lambda: 1000)
@mock.patch('utils.os_utils.platform.system',
side_effect=lambda: 'linux')
@mock.patch('utils.os_utils.platform.machine',
side_effect=lambda: 'machine')
@mock.patch(
("plugins.systems.os_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_container_crawler_plugin(self, *args):
fc = OSContainerCrawler()
for os in fc.crawl(container_id=123):
print os
assert os == (
'linux',
OSFeature(
boottime=1000,
uptime=1,
ipaddr=['1.1.1.1'],
os='os',
os_version='os_version',
os_kernel='platform',
architecture='machine'),
'os')
for i, arg in enumerate(args):
if i > 0: # time.time is called more than once
continue
assert arg.call_count == 1
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.os_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_container_crawler_plugin_avoidsetns(self, *args):
fc = OSContainerCrawler()
for os in fc.crawl(container_id=123, avoid_setns=True):
print os
assert os == (
'linux',
OSFeature(
boottime='unsupported',
uptime='unsupported',
ipaddr='0.0.0.0',
os='os',
os_version='os_version',
os_kernel='unknown',
architecture='unknown'),
'os')
for i, arg in enumerate(args):
print i, arg
if i == 0:
# get_osinfo()
assert arg.call_count == 1
arg.assert_called_with(mount_point='/a/b/c')
elif i == 1:
# get_docker_container_rootfs_path
assert arg.call_count == 1
arg.assert_called_with(123)
else:
# exec_dockerinspect
assert arg.call_count == 1
arg.assert_called_with(123)
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.os_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=throw_os_error)
def test_os_container_crawler_plugin_avoidsetns_failure(self, *args):
fc = OSContainerCrawler()
with self.assertRaises(OSError):
for os in fc.crawl(container_id=123, avoid_setns=True):
pass
@mock.patch('plugins.systems.os_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.os_vm_crawler.psvmi.system_info',
side_effect=lambda vmc: psvmi_sysinfo(1000,
'1.1.1.1',
'osdistro',
'osname',
'osplatform',
'osrelease',
'ostype',
'osversion',
1000000,
100000,
100000,
100000))
@mock.patch('plugins.systems.os_vm_crawler.psvmi')
def test_os_vm_crawler_plugin_without_vm(self, *args):
fc = os_vm_crawler()
for os in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert os == (
'ostype',
OSFeature(
boottime=1000,
uptime='unknown',
ipaddr='1.1.1.1',
os='ostype',
os_version='osversion',
os_kernel='osrelease',
architecture='osplatform'),
'os')
pass
assert args[1].call_count == 1
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler(self, *args):
fc = FileHostCrawler()
for (k, f, fname) in fc.crawl():
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler_with_exclude_dirs(self, *args):
fc = FileHostCrawler()
for (k, f, fname) in fc.crawl(exclude_dirs=['dir']):
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=throw_os_error)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler_failure(self, *args):
fc = FileHostCrawler()
with self.assertRaises(OSError):
for (k, f, fname) in fc.crawl(root_dir='/a/b/c'):
pass
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/'):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch(
("plugins.systems.jar_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.jar_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
def test_jar_container_crawler_plugin(self, *args):
tmpdir = tempfile.mkdtemp()
jar_file_name = 'myfile.jar'
# Ensure the file is read/write by the creator only
saved_umask = os.umask(0077)
path = os.path.join(tmpdir, jar_file_name)
try:
with ZipFile(path, "w") as myjar:
myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!")
myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!")
myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!")
fc = JarContainerCrawler()
jars = list(fc.crawl(root_dir=tmpdir))
#jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir))
print jars
jar_feature = jars[0][1]
assert 'myfile.jar' == jar_feature.name
assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash
assert ['ddc6eff37020aa858e26b1ba8a49ee0e',
'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes
assert 'jar' == jars[0][2]
except IOError as e:
print 'IOError'
finally:
os.remove(path)
@mock.patch(
("plugins.systems.jar_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.jar_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/tmp')
def test_jar_container_crawler_avoidsetns(self, *args):
tmpdir = tempfile.mkdtemp()
jar_file_name = 'myfile.jar'
# Ensure the file is read/write by the creator only
saved_umask = os.umask(0077)
path = os.path.join(tmpdir, jar_file_name)
try:
with ZipFile(path, "w") as myjar:
myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!")
myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!")
myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!")
fc = JarContainerCrawler()
jars = list(fc.crawl(root_dir=os.path.basename(tmpdir), avoid_setns=True))
print jars
jar_feature = jars[0][1]
assert 'myfile.jar' == jar_feature.name
assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash
assert ['ddc6eff37020aa858e26b1ba8a49ee0e',
'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes
assert 'jar' == jars[0][2]
except IOError as e:
print 'IOError'
finally:
os.remove(path)
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch('utils.file_utils.os.walk',
side_effect=throw_os_error)
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_failure(self, *args):
fc = FileContainerCrawler()
with self.assertRaises(OSError):
for (k, f, fname) in fc.crawl(root_dir='/a/b/c'):
pass
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk_for_avoidsetns)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_avoidsetns(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/', avoid_setns=True):
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/1/2/3')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/1/2/3')
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_with_exclude_dirs(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/',
exclude_dirs=['dir']):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk_for_avoidsetns)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_avoidsetns_with_exclude_dirs(
self,
*
args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/',
avoid_setns=True,
exclude_dirs=['/dir']):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/1/2/3')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_host_crawler(self, *args):
fc = ConfigHostCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # lstat
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_host_crawler_with_discover(self, *args):
fc = ConfigHostCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=True)
print configs
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.config_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler(self, *args):
fc = ConfigContainerCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # codecs open
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.config_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
def test_config_container_crawler_discover(self, *args):
fc = ConfigContainerCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=True)
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
("plugins.systems.config_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.config_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler_avoidsetns(self, *args):
fc = ConfigContainerCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False,
avoid_setns=True):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # lstat
@mock.patch(
("plugins.systems.config_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.config_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler_avoidsetns_discover(self, *args):
fc = ConfigContainerCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
avoid_setns=True,
discover_config_files=True)
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_host_crawler_dpkg(self, *args):
fc = PackageHostCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=throw_os_error)
def test_package_host_crawler_dpkg_failure(self, *args):
fc = PackageHostCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'redhat',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_rpm_packages',
side_effect=lambda a, b, c, d: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_host_crawler_rpm(self, *args):
fc = PackageHostCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/rpm', 0, False)
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_container_crawler_dpkg(self, *args):
fc = PackageContainerCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True if 'dpkg' in p else False)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=throw_os_error)
def test_package_container_crawler_dpkg_failure(self, *args):
fc = PackageContainerCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
# get_dpkg_packages is called a second time after the first failure.
# first time is OUTCONTAINER mode with setns
# second time is OUTCONTAINER mode with avoid_setns
assert args[0].call_count == 2
args[0].assert_called_with('/a/b/c', 'var/lib/dpkg', 0)
args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo()
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'redhat',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True if 'rpm' in p else False)
@mock.patch('utils.package_utils.get_rpm_packages',
side_effect=throw_os_error)
def test_package_container_crawler_rpm_failure(self, *args):
fc = PackageContainerCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
# get_dpkg_packages is called a second time after the first failure.
# first time is OUTCONTAINER mode with setns
# second time is OUTCONTAINER mode with avoid_setns
assert args[0].call_count == 2
args[0].assert_called_with('/a/b/c', 'var/lib/rpm', 0, True)
args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo()
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_container_crawler_avoidsetns(self, *args):
fc = PackageContainerCrawler()
for (k, f, fname) in fc.crawl(avoid_setns=True):
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
@mock.patch('plugins.systems.process_host_crawler.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_process_host_crawler(self, *args):
fc = ProcessHostCrawler()
for (k, f, fname) in fc.crawl():
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[0].call_count == 1
@mock.patch(
("plugins.systems.process_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.process_container_crawler.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.process_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
def test_process_container_crawler(self, *args):
fc = ProcessContainerCrawler()
for (k, f, fname) in fc.crawl('123'):
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[0].call_count == 1
@mock.patch('plugins.systems.process_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.process_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch('plugins.systems.process_vm_crawler.psvmi')
def test_process_vm_crawler(self, *args):
fc = process_vm_crawler()
for (k, f, fname) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[1].call_count == 1 # process_iter
@mock.patch('utils.disk_utils.psutil.disk_partitions',
side_effect=mocked_disk_partitions)
@mock.patch('utils.disk_utils.psutil.disk_usage',
side_effect=lambda x: pdiskusage(10, 100))
def test_crawl_disk_partitions_invm_mode(self, *args):
fc = DiskHostCrawler()
disks = fc.crawl()
assert set(disks) == set([('/a',
DiskFeature(partitionname='/dev/a',
freepct=90.0,
fstype='type',
mountpt='/a',
mountopts='opts',
partitionsize=100),
'disk'),
('/b',
DiskFeature(partitionname='/dev/b',
freepct=90.0,
fstype='type',
mountpt='/b',
mountopts='opts',
partitionsize=100),
'disk')])
@mock.patch(
'plugins.systems.disk_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.disk_utils.psutil.disk_partitions',
side_effect=mocked_disk_partitions)
@mock.patch('utils.disk_utils.psutil.disk_usage',
side_effect=lambda x: pdiskusage(10, 100))
@mock.patch(
("plugins.systems.disk_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_disk_partitions_outcontainer_mode(self, *args):
fc = DiskContainerCrawler()
disks = fc.crawl('123')
assert set(disks) == set([('/a',
DiskFeature(partitionname='/dev/a',
freepct=90.0,
fstype='type',
mountpt='/a',
mountopts='opts',
partitionsize=100),
'disk'),
('/b',
DiskFeature(partitionname='/dev/b',
freepct=90.0,
fstype='type',
mountpt='/b',
mountopts='opts',
partitionsize=100),
'disk')])
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_crawl_metrics_invm_mode(self, *args):
fc = MetricHostCrawler()
for (k, f, t) in fc.crawl():
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[0].call_count == 1
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch('utils.metric_utils.round',
side_effect=throw_os_error)
def test_crawl_metrics_invm_mode_failure(self, *args):
with self.assertRaises(OSError):
fc = MetricHostCrawler()
for ff in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.metric_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.disk_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_metrics_outcontainer_mode(self, *args):
fc = MetricContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[0].call_count == 1
@mock.patch('plugins.systems.metric_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.metric_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch(
("plugins.systems.metric_vm_crawler."
"MetricVmCrawler._crawl_metrics_cpu_percent"),
side_effect=lambda proc: 30.0)
@mock.patch('plugins.systems.metric_vm_crawler.psvmi')
def test_crawl_metrics_vm_mode(self, *args):
fc = MetricVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[1].call_count == 1 # process_iter
@mock.patch('utils.connection_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_crawl_connections_invm_mode(self, *args):
fc = ConnectionHostCrawler()
for (k, f, t) in fc.crawl():
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[0].call_count == 1
@mock.patch('utils.connection_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.connection_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.connection_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_connections_outcontainer_mode(self, *args):
fc = ConnectionContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[0].call_count == 1
@mock.patch('plugins.systems.connection_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.connection_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch('plugins.systems.connection_vm_crawler.psvmi')
def test_crawl_connections_outvm_mode(self, *args):
fc = ConnectionVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[1].call_count == 1
@mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(2, 2, 3, 4))
def test_crawl_memory_invm_mode(self, *args):
fc = MemoryHostCrawler()
for (k, f, t) in fc.crawl():
assert f == MemoryFeature(
memory_used=2,
memory_buffered=3,
memory_cached=4,
memory_free=2,
memory_util_percentage=50)
assert args[0].call_count == 1
@mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory',
side_effect=throw_os_error)
def test_crawl_memory_invm_mode_failure(self, *args):
fc = MemoryHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('plugins.systems.memory_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.memory_vm_crawler.psvmi.system_memory_info',
side_effect=lambda vmc: psvmi_memory(10, 20, 30, 40))
@mock.patch('plugins.systems.memory_vm_crawler.psvmi')
def test_crawl_memory_outvm_mode(self, *args):
fc = MemoryVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == MemoryFeature(
memory_used=10,
memory_buffered=20,
memory_cached=30,
memory_free=40,
memory_util_percentage=20)
assert args[1].call_count == 1
@mock.patch(
'plugins.systems.memory_container_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(
10,
10,
3,
10))
@mock.patch('plugins.systems.memory_container_crawler.open',
side_effect=mocked_memory_cgroup_open)
@mock.patch('plugins.systems.memory_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_memory_outcontainer_mode(self, *args):
fc = MemoryContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == MemoryFeature(
memory_used=2,
memory_buffered=200,
memory_cached=100,
memory_free=0,
memory_util_percentage=100)
assert args[1].call_count == 3 # 3 cgroup files
@mock.patch(
'plugins.systems.memory_container_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(
10,
10,
3,
10))
@mock.patch('plugins.systems.memory_container_crawler.open',
side_effect=throw_os_error)
@mock.patch('plugins.systems.memory_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_memory_outcontainer_mode_failure(self, *args):
fc = MemoryContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[1].call_count == 1 # 1 cgroup files
@mock.patch(
'plugins.systems.cpu_host_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
def test_crawl_cpu_invm_mode(self, *args):
fc = CpuHostCrawler()
for (k, f, t) in fc.crawl():
assert f == CpuFeature(
cpu_idle=10,
cpu_nice=20,
cpu_user=30,
cpu_wait=40,
cpu_system=50,
cpu_interrupt=60,
cpu_steal=70,
cpu_util=90)
assert args[0].call_count == 1
@mock.patch('plugins.systems.cpu_host_crawler.psutil.cpu_times_percent',
side_effect=throw_os_error)
def test_crawl_cpu_invm_mode_failure(self, *args):
fc = CpuHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
@mock.patch('plugins.systems.cpu_container_crawler.CpuContainerCrawler._get_scaling_factor',
side_effect=lambda a,b: 1.0)
@mock.patch('plugins.systems.cpu_container_crawler.time.sleep')
@mock.patch('plugins.systems.cpu_container_crawler.open',
side_effect=mocked_cpu_cgroup_open)
@mock.patch('plugins.systems.cpu_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_cpu_outcontainer_mode(self, *args):
fc = CpuContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == CpuFeature(
cpu_idle=90.0,
cpu_nice=20,
cpu_user=5.0,
cpu_wait=40,
cpu_system=5.0,
cpu_interrupt=60,
cpu_steal=70,
cpu_util=10.0)
assert args[1].call_count == 3 # open for 3 cgroup files
@mock.patch(
'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
@mock.patch('plugins.systems.cpu_container_crawler.time.sleep')
@mock.patch('plugins.systems.cpu_container_crawler.open',
side_effect=throw_os_error)
@mock.patch('plugins.systems.cpu_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_cpu_outcontainer_mode_failure(self, *args):
fc = CpuContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.interface_host_crawler.psutil.net_io_counters',
side_effect=lambda pernic: {'interface1-unit-tests':
psutils_net(
10,
20,
30,
40,
50,
60)})
def test_crawl_interface_invm_mode(self, *args):
fc = InterfaceHostCrawler()
for (k, f, t) in fc.crawl():
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl():
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[0].call_count == 2
@mock.patch(
'plugins.systems.interface_host_crawler.psutil.net_io_counters',
side_effect=throw_os_error)
def test_crawl_interface_invm_mode_failure(self, *args):
fc = InterfaceHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
# Each crawl in crawlutils.py instantiates a FeaturesCrawler object
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 2
@mock.patch('plugins.systems.interface_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
@mock.patch(
'plugins.systems.interface_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
'plugins.systems.interface_container_crawler.psutil.net_io_counters',
side_effect=lambda pernic: {'eth0':
psutils_net(
10,
20,
30,
40,
50,
60)})
def test_crawl_interface_outcontainer_mode(self, *args):
fc = InterfaceContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl('123'):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[0].call_count == 2
assert args[1].call_count == 2
@mock.patch('plugins.systems.interface_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.interface_vm_crawler.psvmi.interface_iter',
side_effect=lambda vmc: [psvmi_interface(
'eth1', 10, 20, 30, 40, 50, 60)])
@mock.patch('plugins.systems.interface_vm_crawler.psvmi')
def test_crawl_interface_outvm_mode(self, *args):
fc = InterfaceVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[1].call_count == 2
assert args[2].call_count == 2
@mock.patch('plugins.systems.load_host_crawler.os.getloadavg',
side_effect=lambda: [1, 2, 3])
def test_crawl_load_invm_mode(self, *args):
fc = LoadHostCrawler()
for (k, f, t) in fc.crawl():
assert f == LoadFeature(shortterm=1, midterm=2, longterm=2)
assert args[0].call_count == 1
@mock.patch('plugins.systems.load_host_crawler.os.getloadavg',
side_effect=throw_os_error)
def test_crawl_load_invm_mode_failure(self, *args):
fc = LoadHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.load_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('plugins.systems.load_container_crawler.os.getloadavg',
side_effect=lambda: [1, 2, 3])
@mock.patch('plugins.systems.load_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_load_outcontainer_mode(self, *args):
fc = LoadContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == LoadFeature(shortterm=1, midterm=2, longterm=2)
assert args[1].call_count == 1
assert args[2].call_count == 1
@mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps',
side_effect=lambda: [{'State': {'Running': True},
'Image': 'reg/image:latest',
'Config': {'Cmd': 'command'},
'Name': 'name',
'Id': 'id'}])
def test_crawl_dockerps_invm_mode(self, *args):
fc = DockerpsHostCrawler()
for (k, f, t) in fc.crawl():
assert f == DockerPSFeature(
Status=True,
Created=0,
Image='reg/image:latest',
Ports=[],
Command='command',
Names='name',
Id='id')
assert args[0].call_count == 1
@mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps',
side_effect=throw_os_error)
def test_crawl_dockerps_invm_mode_failure(self, *args):
fc = DockerpsHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('plugins.systems.dockerhistory_container_crawler.exec_docker_history',
side_effect=lambda long_id: [
{'Id': 'image1', 'random': 'abc'},
{'Id': 'image2', 'random': 'abc'}])
def test_crawl_dockerhistory_outcontainer_mode(self, *args):
fc = DockerhistoryContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == {'history': [{'Id': 'image1', 'random': 'abc'},
{'Id': 'image2', 'random': 'abc'}]}
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerhistory_container_crawler.exec_docker_history',
side_effect=throw_os_error)
def test_crawl_dockerhistory_outcontainer_mode_failure(self, *args):
fc = DockerhistoryContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect',
side_effect=lambda long_id: {
'Id': 'image1',
'random': 'abc'})
def test_crawl_dockerinspect_outcontainer_mode(self, *args):
fc = DockerinspectContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == {'Id': 'image1', 'random': 'abc'}
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect',
side_effect=throw_os_error)
def test_crawl_dockerinspect_outcontainer_mode_failure(self, *args):
fc = DockerinspectContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
| canturkisci/agentless-system-crawler | tests/unit/test_plugins.py | Python | apache-2.0 | 72,595 |
"""Worker implementation."""
from __future__ import absolute_import, unicode_literals
from .worker import WorkController
__all__ = ('WorkController',)
| kawamon/hue | desktop/core/ext-py/celery-4.2.1/celery/worker/__init__.py | Python | apache-2.0 | 152 |
import click
def incomplete(package):
click.echo('{} packages not yet implemented'.format(package))
@click.group()
def run():
'''Build packages inside Docker containers.'''
pass
@click.command()
@click.option('--image', '-i', help='image to build in', required=True)
def rpm(image):
package = click.style('RPM', fg='red', bold=True)
incomplete(package)
@click.command()
@click.option('--image', '-i', help='image to build in', required=True)
def deb(image):
package = click.style('Debian', fg='magenta', bold=True)
incomplete(package)
@click.command()
@click.option('--image', '-i', help='image to build in', required=True)
def arch(image):
package = click.style('Arch', fg='cyan', bold=True)
incomplete(package)
run.add_command(rpm)
run.add_command(deb)
run.add_command(arch)
# vim: ft=python sw=4 ts=4 et
| carlwgeorge/pnc | pnc.cli/pnc/cli/__init__.py | Python | apache-2.0 | 850 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0023_assignment_failed'),
]
operations = [
migrations.AddField(
model_name='certification',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='step',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workercertification',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workflow',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workflowversion',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| b12io/orchestra | orchestra/migrations/0024_auto_20160325_1916.py | Python | apache-2.0 | 1,224 |
import threading
import time
import re
from openflow.optin_manager.sfa.openflow_utils.CreateOFSliver import CreateOFSliver
from openflow.optin_manager.sfa.openflow_utils.sliver_status import get_sliver_status
from openflow.optin_manager.sfa.openflow_utils.delete_slice import delete_slice
from openflow.optin_manager.sfa.openflow_utils.rspec3_to_expedient import get_fs_from_group
from openflow.optin_manager.sfa.util.xrn import Xrn
from openflow.optin_manager.opts.models import Experiment, ExperimentFLowSpace
from openflow.optin_manager.xmlrpc_server.models import CallBackServerProxy, FVServerProxy
#TODO: Uncomment when merge
#from expedient.common.utils.mail import send_mail
from django.conf import settings
from openflow.optin_manager.sfa.openflow_utils.ServiceThread import ServiceThread
from openflow.optin_manager.sfa.models import ExpiringComponents
from openflow.optin_manager.sfa.openflow_utils.federationlinkmanager import FederationLinkManager
#XXX TEST
from openflow.optin_manager.sfa.tests.data_example import test_switches, test_links
class OFShell:
def __init__(self):
pass
@staticmethod
def get_switches(used_switches=[]):
complete_list = []
switches = OFShell().get_raw_switches()
for switch in switches:
if len(used_switches)>0:
if not switch[0] in used_switches:
continue
if int(switch[1]['nPorts']) == 0:
#TODO: Uncomment when merge with ofelia.development
#send_mail('SFA OptinManager Error', 'There are some errors related with switches: GetSwitches() returned 0 ports.',settings.ROOT_EMAIL, [settings.ROOT_EMAIL])
raise Exception("The switch with dpid:%s has a connection problem and the OCF Island Manager has already been informed. Please try again later." % str(switch[0]))
#TODO: Send Mail to the Island Manager Here.
port_list = switch[1]['portNames'].split(',')
ports = list()
for port in port_list:
match = re.match(r'[\s]*(.*)\((.*)\)', port)
ports.append({'port_name':match.group(1), 'port_num':match.group(2)})
complete_list.append({'dpid':switch[0], 'ports':ports})
return complete_list
@staticmethod
def get_links():
links = OFShell().get_raw_links()
link_list = list()
for link in links:
link_list.append({'src':{ 'dpid':link[0],'port':link[1]}, 'dst':{'dpid':link[2], 'port':link[3]}})
#for link in FederationLinkManager.get_federated_links():
# link_list.append({'src':{'dpid':link['src_id'], 'port':link['src_port']}, 'dst':{'dpid':link['dst_id'],'port':link['dst_port']}})
return link_list
@staticmethod
def get_federation_links():
link_list = list()
for link in FederationLinkManager.get_federated_links():
link_list.append({'src':{'dpid':link['src_id'], 'port':link['src_port']}, 'dst':{'dpid':link['dst_id'],'port':link['dst_port']}})
return link_list
def GetNodes(self,slice_urn=None,authority=None):
if not slice_urn:
switch_list = self.get_switches()
link_list = self.get_links()
federated_links = self.get_federation_links()
return {'switches':switch_list, 'links':link_list, 'federation_links':federated_links}
else:
nodes = list()
experiments = Experiment.objects.filter(slice_id=slice_urn)
for experiment in experiments:
expfss = ExperimentFLowSpace.objects.filter(exp = experiment.id)
for expfs in expfss:
if not expfs.dpid in nodes:
nodes.append(expfs.dpid)
switches = self.get_switches(nodes)
return {'switches':switches, 'links':[]}
#def GetSlice(self,slicename,authority):
#
# name = slicename
# nodes = self.GetNodes()
# slices = dict()
# List = list()
# return slices
def StartSlice(self, slice_urn):
#Look if the slice exists and return True or RecordNotFound
experiments = Experiment.objects.filter(slice_id=str(slice_urn))
if len(experiments) > 0:
return True
else:
raise ""
def StopSlice(self, slice_urn):
#Look if the slice exists and return True or RecordNotFound
experiments = Experiment.objects.filter(slice_id=slice_urn)
if len(experiments) > 0:
return True
else:
raise ""
def RebootSlice(self, slice_urn):
return self.StartSlice(slice_urn)
def DeleteSlice(self, slice_urn):
try:
delete_slice(slice_urn)
return 1
except Exception as e:
print e
raise ""
def CreateSliver(self, requested_attributes, slice_urn, authority,expiration):
project_description = 'SFA Project from %s' %authority
slice_id = slice_urn
for rspec_attrs in requested_attributes:
switch_slivers = get_fs_from_group(rspec_attrs['match'], rspec_attrs['group'])
controller = rspec_attrs['controller'][0]['url']
email = rspec_attrs['email']
email_pass = ''
slice_description = rspec_attrs['description']
if not self.check_req_switches(switch_slivers):
raise Exception("The Requested OF Switches on the RSpec do not match with the available OF switches of this island. Please check the datapath IDs of your Request RSpec.")
CreateOFSliver(slice_id, authority, project_description, slice_urn, slice_description, controller, email, email_pass, switch_slivers)
if expiration:
#Since there is a synchronous connection, expiring_components table is easier to fill than VTAM
#ExpiringComponents.objects.create(slice=slice_urn, authority=authority, expires=expiration)
pass
return 1
def SliverStatus(self, slice_urn):
try:
print "-----------------------------------------------------------SliverStatus"
sliver_status = get_sliver_status(slice_urn)
print sliver_status
if len(sliver_status) == 0:
xrn = Xrn(slice_urn, 'slice')
slice_leaf = xrn.get_leaf()
sliver_status = ['The requested flowspace for slice %s is still pending for approval' %slice_leaf]
granted_fs = {'granted_flowspaces':get_sliver_status(slice_urn)}
return [granted_fs]
except Exception as e:
import traceback
print traceback.print_exc()
raise e
def check_req_switches(self, switch_slivers):
available_switches = self.get_raw_switches()
for sliver in switch_slivers:
found = False
for switch in available_switches:
if str(sliver['datapath_id']) == str(switch[0]): #Avoiding Unicodes
found = True
break
if found == False:
return False
return True
def get_raw_switches(self):
try:
#raise Exception("")
fv = FVServerProxy.objects.all()[0]
switches = fv.get_switches()
except Exception as e:
switches = test_switches
#raise e
return switches
def get_raw_links(self):
try:
#raise Exception("")
fv = FVServerProxy.objects.all()[0]
links = fv.get_links()
except Exception as e:
links = test_links
#raise e
return links
| dana-i2cat/felix | optin_manager/src/python/openflow/optin_manager/sfa/drivers/OFShell.py | Python | apache-2.0 | 8,320 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# tacker documentation build configuration file, created by
# sphinx-quickstart on Tue May 31 19:07:30 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'oslosphinx',
'reno.sphinxext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tacker Release Notes'
copyright = u'2016, Tacker Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import pbr.version
tacker_version = pbr.version.VersionInfo('tacker')
release = tacker_version.version_string_with_vcs()
version = tacker_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to
# use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tackerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'TackerReleaseNotes.tex',
u'Tacker Release Notes Documentation',
u'Tacker Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tackerreleasenotes', u'Tacker Release Notes Documentation',
[u'Tacker Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TackerReleaseNotes', u'Tacker Release Notes Documentation',
u'Tacker Developers', 'TackerReleaseNotes',
'Tacker Project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| priya-pp/Tacker | releasenotes/source/conf.py | Python | apache-2.0 | 8,504 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup
DESC = """Installer for Apache Bloodhound
Adds the bloodhound_setup cli command.
"""
versions = [
(0, 8, 0),
(0, 9, 0),
]
latest = '.'.join(str(x) for x in versions[-1])
setup(
name="bloodhound_installer",
version=latest,
description=DESC.split('\n', 1)[0],
author="Apache Bloodhound",
license="Apache License v2",
url="https://bloodhound.apache.org/",
requires=['trac', 'BloodhoundMultiProduct'],
packages=['bhsetup'],
entry_points="""
[console_scripts]
bloodhound_setup = bhsetup.bloodhound_setup:run
""",
long_description=DESC,
)
| apache/bloodhound | installer/setup.py | Python | apache-2.0 | 1,462 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from ggrc.models.mixins import (
deferred, Noted, Described, Hyperlinked, WithContact, Titled, Slugged,
)
from ggrc.models.object_document import Documentable
from ggrc.models.object_person import Personable
from ggrc.models.relationship import Relatable
from ggrc.models.request import Request
class Response(Noted, Described, Hyperlinked, WithContact,
Titled, Slugged, db.Model):
__tablename__ = 'responses'
__mapper_args__ = {
'polymorphic_on': 'response_type',
}
_title_uniqueness = False
_slug_uniqueness = False
# Override `Titled.title` to provide default=""
title = deferred(
db.Column(db.String, nullable=False, default=""), 'Response')
VALID_STATES = (u'Assigned', u'Submitted', u'Accepted', u'Rejected')
VALID_TYPES = (u'documentation', u'interview', u'population sample')
request_id = deferred(
db.Column(db.Integer, db.ForeignKey('requests.id'), nullable=False),
'Response')
response_type = db.Column(db.Enum(*VALID_TYPES), nullable=False)
status = deferred(db.Column(db.String, nullable=False), 'Response')
population_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
population_count = deferred(db.Column(db.Integer, nullable=True), 'Response')
sample_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
sample_count = deferred(db.Column(db.Integer, nullable=True), 'Response')
sample_evidence_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
population_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.population_worksheet_id"
)
sample_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_worksheet_id"
)
sample_evidence = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_evidence_id"
)
@staticmethod
def _extra_table_args(cls):
return (
db.Index('population_worksheet_document', 'population_worksheet_id'),
db.Index('sample_evidence_document', 'sample_evidence_id'),
db.Index('sample_worksheet_document', 'sample_worksheet_id'),
)
_publish_attrs = [
'request',
'status',
'response_type',
]
_sanitize_html = [
'description',
]
_aliases = {
"description": "Response",
"request": {
"display_name": "Request",
"mandatory": True,
"filter_by": "_filter_by_request",
},
"response_type": {
"display_name": "Response Type",
"mandatory": True,
},
"status": "Status",
"title": None,
"secondary_contact": None,
"notes": None,
}
def _display_name(self):
return u'Response with id={0} for Audit "{1}"'.format(
self.id, self.request.audit.display_name)
@classmethod
def _filter_by_request(cls, predicate):
return Request.query.filter(
(Request.id == cls.request_id) &
predicate(Request.slug)
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Response, cls).eager_query()
return query.options(
orm.joinedload('request'))
class DocumentationResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'documentation'
}
_table_plural = 'documentation_responses'
_publish_attrs = []
_sanitize_html = []
class InterviewResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'interview'
}
_table_plural = 'interview_responses'
meetings = db.relationship(
'Meeting',
backref='response',
cascade='all, delete-orphan'
)
_publish_attrs = [
'meetings',
]
_sanitize_html = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(InterviewResponse, cls).eager_query()
return query.options(
orm.subqueryload('meetings'))
class PopulationSampleResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'population sample'
}
_table_plural = 'population_sample_responses'
_publish_attrs = [
'population_worksheet',
'population_count',
'sample_worksheet',
'sample_count',
'sample_evidence',
]
_sanitize_html = [
'population_count',
'sample_count',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(PopulationSampleResponse, cls).eager_query()
return query.options(
orm.joinedload('population_worksheet'),
orm.joinedload('sample_worksheet'),
orm.joinedload('sample_evidence'))
| hyperNURb/ggrc-core | src/ggrc/models/response.py | Python | apache-2.0 | 5,096 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show rack --rack`."""
from aquilon.aqdb.model import Rack
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
class CommandShowRackRack(BrokerCommand):
required_parameters = ["rack"]
def render(self, session, rack, **arguments):
return Rack.get_unique(session, rack, compel=True)
| stdweird/aquilon | lib/python2.6/aquilon/worker/commands/show_rack_rack.py | Python | apache-2.0 | 1,055 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from parallel_executor_test_base import TestParallelExecutorBase, DeviceType
import seresnext_net
import paddle.fluid.core as core
class TestResnetWithReduceBase(TestParallelExecutorBase):
def _compare_reduce_and_allreduce(self, use_device, delta2=1e-5):
if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda():
return
all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=False,
optimizer=seresnext_net.optimizer)
reduce_first_loss, reduce_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=True,
optimizer=seresnext_net.optimizer)
for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
if not use_device:
return
all_reduce_first_loss_seq, all_reduce_last_loss_seq = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=False,
optimizer=seresnext_net.optimizer,
enable_sequential_execution=True)
reduce_first_loss_seq, reduce_last_loss_seq = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=True,
optimizer=seresnext_net.optimizer,
enable_sequential_execution=True)
for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, all_reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
for loss in zip(reduce_first_loss, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(reduce_last_loss, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
for loss in zip(all_reduce_first_loss_seq, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss_seq, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
class TestResnetWithReduceCPU(TestResnetWithReduceBase):
def test_seresnext_with_reduce(self):
self._compare_reduce_and_allreduce(
use_device=DeviceType.CPU, delta2=1e-3)
if __name__ == '__main__':
unittest.main()
| PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py | Python | apache-2.0 | 4,150 |
from a10sdk.common.A10BaseClass import A10BaseClass
class PortReservation(A10BaseClass):
"""Class Description::
DS-Lite Static Port Reservation.
Class port-reservation supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param nat_end_port: {"description": "NAT End Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param inside: {"optional": false, "type": "string", "description": "Inside User Address and Port Range (DS-Lite Inside User's Tunnel Source IPv6 Address)", "format": "ipv6-address"}
:param tunnel_dest_address: {"optional": false, "type": "string", "description": "DS-Lite Inside User's Tunnel Destination IPv6 Address", "format": "ipv6-address"}
:param inside_start_port: {"description": "Inside Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat: {"optional": false, "type": "string", "description": "NAT Port Range (NAT IP address)", "format": "ipv4-address"}
:param inside_end_port: {"description": "Inside End Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat_start_port: {"description": "NAT Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param inside_addr: {"optional": false, "type": "string", "description": "Inside User IP address", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "inside","tunnel_dest_address","inside_addr","inside_start_port","inside_end_port","nat","nat_start_port","nat_end_port"]
self.b_key = "port-reservation"
self.a10_url="/axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}"
self.DeviceProxy = ""
self.nat_end_port = ""
self.uuid = ""
self.inside = ""
self.tunnel_dest_address = ""
self.inside_start_port = ""
self.nat = ""
self.inside_end_port = ""
self.nat_start_port = ""
self.inside_addr = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/cgnv6/cgnv6_ds_lite_port_reservation.py | Python | apache-2.0 | 2,890 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from pychron.core.ui import set_qt
from six.moves import range
from six.moves import zip
set_qt()
# ============= enthought library reverts =======================
from traits.api import Any, Str
# ============= standard library imports ========================
import os
import struct
from numpy import array
# ============= local library imports ==========================
from pychron.core.helpers.filetools import pathtolist
from pychron.loggable import Loggable
from pychron.core.helpers.logger_setup import logging_setup
from pychron.mass_spec.database.massspec_database_adapter import MassSpecDatabaseAdapter
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.experiment.utilities.identifier import (
convert_identifier_to_int,
strip_runid,
)
logging_setup("ms_reverter")
class MassSpecReverter(Loggable):
"""
use to revert data from Pychron to MassSpec.
uses the MassSpecDatabasereverter to do the actual work.
This class takes a list of run ids, extracts data from
the pychron database, prepares data for use with MassSpecDatabasereverter,
then writes to the MassSpec database
"""
source = Any
destination = Any
path = Str
def do_revert(self):
# if self._connect_to_source():
if self._connect_to_destination():
self._do_revert()
def do_reimport(self):
if self._connect_to_source():
if self._connect_to_destination():
self._do_reimport()
def setup_source(self):
src = IsotopeDatabaseManager(connect=False, bind=False)
db = src.db
db.trait_set(
name="pychrondata",
kind="mysql",
host=os.environ.get("HOST"),
username="root",
password=os.environ.get("DB_PWD"),
)
self.source = src
def setup_destination(self):
dest = MassSpecDatabaseAdapter()
dest.trait_set(
name="massspecdata_crow",
kind="mysql",
username="root",
password=os.environ.get("DB_PWD"),
)
self.destination = dest
def _connect_to_source(self):
return self.source.connect()
def _connect_to_destination(self):
return self.destination.connect()
def _load_runids(self):
runids = pathtolist(self.path)
return runids
def _do_reimport(self):
rids = self._load_runids()
for rid in rids:
self._reimport_rid(rid)
def _reimport_rid(self, rid):
self.debug("========= Reimport {} =========".format(rid))
dest = self.destination
src_an = self._get_analysis_from_source(rid)
if src_an is None:
self.warning("could not find {}".format(rid))
else:
dest_an = dest.get_analysis_rid(rid)
for iso in dest_an.isotopes:
pb, pbnc = self._generate_blobs(src_an, iso.Label)
pt = iso.peak_time_series[0]
pt.PeakTimeBlob = pb
pt.PeakNeverBslnCorBlob = pbnc
dest.commit()
def _generate_blobs(self, src, isok):
dbiso = next(
(
i
for i in src.isotopes
if i.molecular_weight.name == isok and i.kind == "signal"
),
None,
)
dbiso_bs = next(
(
i
for i in src.isotopes
if i.molecular_weight.name == isok and i.kind == "baseline"
),
None,
)
xs, ys = self._unpack_data(dbiso.signal.data)
bsxs, bsys = self._unpack_data(dbiso_bs.signal.data)
bs = bsys.mean()
cys = ys - bs
ncblob = "".join([struct.pack(">f", v) for v in ys])
cblob = "".join([struct.pack(">ff", y, x) for y, x in zip(cys, xs)])
return cblob, ncblob
def _unpack_data(self, blob):
endianness = ">"
sx, sy = list(
zip(
*[
struct.unpack("{}ff".format(endianness), blob[i : i + 8])
for i in range(0, len(blob), 8)
]
)
)
return array(sx), array(sy)
def _get_analysis_from_source(self, rid):
if rid.count("-") > 1:
args = rid.split("-")
step = None
lan = "-".join(args[:-1])
aliquot = args[-1]
else:
lan, aliquot, step = strip_runid(rid)
lan = convert_identifier_to_int(lan)
db = self.source.db
dban = db.get_unique_analysis(lan, aliquot, step)
return dban
def _do_revert(self):
rids = self._load_runids()
for rid in rids:
self._revert_rid(rid)
def _revert_rid(self, rid):
"""
rid: str. typical runid e.g 12345, 12345-01, 12345-01A
if rid lacks an aliquot revert all aliquots and steps for
this rid
"""
self.debug("reverting {}".format(rid))
if "-" in rid:
# this is a specific analysis
self._revert_analysis(rid)
else:
self._revert_analyses(rid)
def _revert_analyses(self, rid):
"""
rid: str. e.g 12345
revert all analyses with this labnumber
"""
def _revert_analysis(self, rid):
"""
rid: str. e.g 12345-01 or 12345-01A
only revert this specific analysis
"""
# l,a,s = strip_runid(rid)
# db = self.source.db
dest = self.destination
# with db.session_ctx():
self.debug("========= Revert {} =========".format(rid))
dest_an = dest.get_analysis_rid(rid)
for iso in dest_an.isotopes:
isol = iso.Label
self.debug("{} reverting isotope id = {}".format(isol, iso.IsotopeID))
# fix IsotopeTable.NumCnts
n = len(iso.peak_time_series[0].PeakTimeBlob) / 8
self.debug(
"{} fixing NumCnts. current={} new={}".format(isol, iso.NumCnts, n)
)
iso.NumCnts = n
nf = len(iso.peak_time_series)
if nf > 1:
self.debug("{} deleting {} refits".format(isol, nf - 1))
# delete peak time blobs
for i, pt in enumerate(iso.peak_time_series[1:]):
self.debug(
"{} A {:02d} deleting pt series {}".format(
isol, i + 1, pt.Counter
)
)
dest.delete(pt)
# delete isotope results
for i, ir in enumerate(iso.results[1:]):
self.debug(
"{} B {:02d} deleting results {}".format(
isol, i + 1, ir.Counter
)
)
dest.delete(ir)
dest.commit()
if __name__ == "__main__":
m = MassSpecReverter(path="/Users/ross/Sandbox/crow_revert.txt")
m.setup_source()
m.setup_destination()
m.do_reimport()
# m.do_revert()
# ============= EOF =============================================
#
# def _get_analyses_from_source(self, labnumber):
# db = self.source.db
# with db.session_ctx():
# pass
| USGSDenverPychron/pychron | pychron/entry/mass_spec_reverter.py | Python | apache-2.0 | 8,096 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.core.targets.doc import Page
from pants.backend.core.tasks.filter import Filter
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseFilterTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Dependencies,
'java_library': JavaLibrary,
'page': Page,
'python_library': PythonLibrary,
'python_requirement_library': PythonRequirementLibrary,
}
)
@classmethod
def task_type(cls):
return Filter
class FilterEmptyTargetsTest(BaseFilterTest):
def test_no_filters(self):
self.assert_console_output()
def test_type(self):
self.assert_console_output(options={'type': ['page']})
self.assert_console_output(options={'type': ['java_library']})
def test_regex(self):
self.assert_console_output(options={'regex': ['^common']})
self.assert_console_output(options={'regex': ['-^common']})
class FilterTest(BaseFilterTest):
def setUp(self):
super(FilterTest, self).setUp()
requirement_injected = set()
def add_to_build_file(path, name, *deps):
if path not in requirement_injected:
self.add_to_build_file(path, "python_requirement_library(name='foo')")
requirement_injected.add(path)
all_deps = ["'{0}'".format(dep) for dep in deps] + ["':foo'"]
self.add_to_build_file(path, dedent("""
python_library(name='{name}',
dependencies=[{all_deps}],
tags=['{tag}']
)
""".format(name=name, tag=name + "_tag", all_deps=','.join(all_deps))))
add_to_build_file('common/a', 'a')
add_to_build_file('common/b', 'b')
add_to_build_file('common/c', 'c')
add_to_build_file('overlaps', 'one', 'common/a', 'common/b')
add_to_build_file('overlaps', 'two', 'common/a', 'common/c')
add_to_build_file('overlaps', 'three', 'common/a', 'overlaps:one')
def test_roots(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
extra_targets=self.targets('overlaps/::')
)
def test_nodups(self):
targets = [self.target('common/b')] * 2
self.assertEqual(2, len(targets))
self.assert_console_output(
'common/b:b',
targets=targets
)
def test_no_filters(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::')
)
def test_filter_type(self):
self.assert_console_output(
'common/a:a',
'common/b:b',
'common/c:c',
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'type': ['python_library']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:foo',
targets=self.targets('::'),
options={'type': ['-python_library']}
)
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
# Note that the comma is inside the string, so these are ORed.
options={'type': ['python_requirement_library,python_library']}
)
def test_filter_multiple_types(self):
# A target can only have one type, so the output should be empty.
self.assert_console_output(
targets=self.targets('::'),
options={'type': ['python_requirement_library', 'python_library']}
)
def test_filter_target(self):
self.assert_console_output(
'common/a:a',
'overlaps:foo',
targets=self.targets('::'),
options={'target': ['common/a,overlaps/:foo']}
)
self.assert_console_output(
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'target': ['-common/a:a,overlaps:one,overlaps:foo']}
)
def test_filter_ancestor(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'overlaps:one',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['overlaps:one,overlaps:foo']}
)
self.assert_console_output(
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'ancestor': ['-overlaps:one,overlaps:foo']}
)
def test_filter_ancestor_out_of_context(self):
"""Tests that targets outside of the context used as filters are parsed before use."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['-blacklist']}
)
def test_filter_ancestor_not_passed_targets(self):
"""Tests filtering targets based on an ancestor not in that list of targets."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'), # blacklist is not in the list of targets
options={'ancestor': ['-blacklist']}
)
self.assert_console_output(
'common/a:a', # a: _should_ show up if we don't filter.
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
options={'ancestor': []}
)
def test_filter_regex(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('::'),
options={'regex': ['^common']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'regex': ['+foo,^overlaps']}
)
self.assert_console_output(
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'regex': ['-^common,foo$']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'regex': ['abc)']}
)
def test_filter_tag_regex(self):
# Filter two.
self.assert_console_output(
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['+e(?=e)']}
)
# Removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['-one|two']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'tag_regex': ['abc)']}
)
def test_filter_tag(self):
# One match.
self.assert_console_output(
'common/a:a',
targets=self.targets('::'),
options={'tag': ['+a_tag']}
)
# Two matches.
self.assert_console_output(
'common/a:a',
'common/b:b',
targets=self.targets('::'),
options={'tag': ['+a_tag,b_tag']}
)
# One removal.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag']}
)
# Two removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag,two_tag']}
)
# No match.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['+abcdefg_tag']}
)
# No match due to AND of separate predicates.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['a_tag', 'b_tag']}
)
| sameerparekh/pants | tests/python/pants_test/backend/core/tasks/test_filter.py | Python | apache-2.0 | 9,825 |
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
XML_WARNING = False
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
global XML_WARNING
if not XML_WARNING:
LOG.warning(_('XML support has been deprecated and may be removed '
'as early as the Juno release.'))
XML_WARNING = True
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = \
strutils.bool_from_string(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hints
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
server["block_device_mapping"] = block_device_mapping
block_device_mapping_v2 = self._extract_block_device_mapping_v2(
server_node)
if block_device_mapping_v2 is not None:
server["block_device_mapping_v2"] = block_device_mapping_v2
# NOTE(vish): Support this incorrect version because it was in the code
# base for a while and we don't want to accidentally break
# anyone that might be using it.
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
auto_disk_config = server_node.getAttribute('OS-DCF:diskConfig')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
config_drive = server_node.getAttribute('config_drive')
if config_drive:
server['config_drive'] = config_drive
return server
def _extract_block_device_mapping(self, server_node):
"""Marshal the block_device_mapping node of a parsed request."""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
mapping = {}
attributes = ["volume_id", "snapshot_id", "device_name",
"virtual_name", "volume_size"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = value
attributes = ["delete_on_termination", "no_device"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = strutils.bool_from_string(value)
block_device_mapping.append(mapping)
return block_device_mapping
else:
return None
def _extract_block_device_mapping_v2(self, server_node):
"""Marshal the new block_device_mappings."""
node = self.find_first_child_named(server_node,
"block_device_mapping_v2")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
block_device_mapping.append(
dict((attr, child.getAttribute(attr))
for attr in block_device.bdm_new_api_fields
if child.getAttribute(attr)))
return block_device_mapping
def _extract_scheduler_hints(self, server_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request."""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name = self.find_attribute_or_element(sg_node, 'name')
if name:
item["name"] = name
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
if node.hasAttribute("auto_disk_config"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
if node.hasAttribute("adminPass"):
rebuild["adminPass"] = node.getAttribute("adminPass")
if node.hasAttribute("accessIPv4"):
rebuild["accessIPv4"] = node.getAttribute("accessIPv4")
if node.hasAttribute("accessIPv6"):
rebuild["accessIPv6"] = node.getAttribute("accessIPv6")
if node.hasAttribute("preserve_ephemeral"):
rebuild["preserve_ephemeral"] = strutils.bool_from_string(
node.getAttribute("preserve_ephemeral"), strict=True)
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['OS-DCF:diskConfig'] = node.getAttribute("auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
resize['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
vm_state, task_state = common.task_and_vm_state_from_status(status)
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker,
want_objects=True)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = instance_obj.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
#fixed IP address is optional
#if the fixed IP address is not provided then
#it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ip_address(
address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly."""
if not user_data:
return
if self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
if not utils.is_valid_ipv4(address):
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
if not utils.is_valid_ipv6(address):
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id,
want_objects=True)
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = None
if (self.ext_mgr.is_loaded('os-networks')
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
if not isinstance(requested_networks, list):
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# optional openstack extensions:
key_name = None
if self.ext_mgr.is_loaded('os-keypairs'):
key_name = server_dict.get('key_name')
user_data = None
if self.ext_mgr.is_loaded('os-user-data'):
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = None
if self.ext_mgr.is_loaded('os-availability-zone'):
availability_zone = server_dict.get('availability_zone')
block_device_mapping = None
block_device_mapping_v2 = None
legacy_bdm = True
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):
# Consider the new data format for block device mapping
block_device_mapping_v2 = server_dict.get(
'block_device_mapping_v2', [])
# NOTE (ndipanov): Disable usage of both legacy and new
# block device format in the same request
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
try:
block_device_mapping_v2 = [
block_device.BlockDeviceDict.from_api(bdm_dict)
for bdm_dict in block_device_mapping_v2]
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
block_device_mapping = (block_device_mapping or
block_device_mapping_v2)
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
try:
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=context,
read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.MultiplePortsNotApplicable,
exception.NetworkNotFound,
exception.PortNotFound,
exception.SecurityGroupNotFound,
exception.InvalidBDM,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.InstanceUserDataMalformed) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return wsgi.ResponseObject({'reservation_id': resv_id},
xml=ServerMultipleCreateTemplate)
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id,
want_objects=True)
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
bdm_v2 = data['server'].get('block_device_mapping_v2')
if (not image_ref and (
(bdm and self.ext_mgr.is_loaded('os-volumes')) or
(bdm_v2 and
self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = self._get_server_admin_password(body['changePassword'])
server = self._get_server(context, req, id)
try:
self.compute_api.set_admin_password(context, server, password)
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
body = body['rebuild']
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(body)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
kwargs = {}
# take the preserve_ephemeral value into account only when the
# corresponding extension is active
if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')
and 'preserve_ephemeral' in body):
kwargs['preserve_ephemeral'] = strutils.bool_from_string(
body['preserve_ephemeral'], strict=True)
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs.pop('files_to_inject')
files_to_inject = self._get_injected_files(personality)
else:
files_to_inject = None
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
files_to_inject=files_to_inject,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
props = bdms.root_metadata(
context, self.compute_api.image_service,
self.compute_api.volume_api)
image_meta = {'properties': props}
else:
src_image = self.compute_api.image_service.\
show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
url_prefix = self._view_builder._update_glance_link_prefix(
req.application_url)
image_ref = os.path.join(url_prefix,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug(_("Removing options '%s' from query"),
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
| nkrinner/nova | nova/api/openstack/compute/servers.py | Python | apache-2.0 | 61,851 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import fixtures
import time
from oslo_config import cfg
from nova import context
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova import utils
from nova.virt import fake
CONF = cfg.CONF
class TestEvacuateResourceTrackerRace(
test.TestCase, integrated_helpers.InstanceHelperMixin,
):
"""Demonstrate bug #1896463.
Trigger a race condition between an almost finished evacuation that is
dropping the migration context, and the _update_available_resource()
periodic task that already loaded the instance list but haven't loaded the
migration list yet. The result is that the PCI allocation made by the
evacuation is deleted by the overlapping periodic task run and the instance
will not have PCI allocation after the evacuation.
"""
def setUp(self):
super().setUp()
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_mac_by_pci_address',
return_value='52:54:00:1e:59:c6'))
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_vf_num_by_pci_address',
return_value=1))
self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = 'latest'
self.api = self.admin_api
self.start_service('conductor')
self.start_service('scheduler')
self.flags(compute_driver='fake.FakeDriverWithPciResources')
self.useFixture(
fake.FakeDriverWithPciResources.
FakeDriverWithPciResourcesConfigFixture())
self.compute1 = self._start_compute('host1')
self.compute1_id = self._get_compute_node_id_by_host('host1')
self.compute1_service_id = self.admin_api.get_services(
host='host1', binary='nova-compute')[0]['id']
self.compute2 = self._start_compute('host2')
self.compute2_id = self._get_compute_node_id_by_host('host2')
self.compute2_service_id = self.admin_api.get_services(
host='host2', binary='nova-compute')[0]['id']
# add extra ports and the related network to the neutron fixture
# specifically for these tests. It cannot be added globally in the
# fixture init as it adds a second network that makes auto allocation
# based test to fail due to ambiguous networks.
self.neutron._ports[self.neutron.sriov_port['id']] = \
copy.deepcopy(self.neutron.sriov_port)
self.neutron._networks[
self.neutron.network_2['id']] = self.neutron.network_2
self.neutron._subnets[
self.neutron.subnet_2['id']] = self.neutron.subnet_2
self.ctxt = context.get_admin_context()
def _get_compute_node_id_by_host(self, host):
# we specifically need the integer id of the node not the UUID so we
# need to use the old microversion
with utils.temporary_mutation(self.admin_api, microversion='2.52'):
hypers = self.admin_api.api_get(
'os-hypervisors').body['hypervisors']
for hyper in hypers:
if hyper['hypervisor_hostname'] == host:
return hyper['id']
self.fail('Hypervisor with hostname=%s not found' % host)
def _assert_pci_device_allocated(
self, instance_uuid, compute_node_id, num=1):
"""Assert that a given number of PCI devices are allocated to the
instance on the given host.
"""
devices = objects.PciDeviceList.get_by_instance_uuid(
self.ctxt, instance_uuid)
devices_on_host = [dev for dev in devices
if dev.compute_node_id == compute_node_id]
self.assertEqual(num, len(devices_on_host))
def test_evacuate_races_with_update_available_resource(self):
# Create a server with a direct port to have PCI allocation
server = self._create_server(
name='test-server-for-bug-1896463',
networks=[{'port': self.neutron.sriov_port['id']}],
host='host1'
)
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(
server['id'], self.compute2_id, num=0)
# stop and force down the compute the instance is on to allow
# evacuation
self.compute1.stop()
self.admin_api.put_service(
self.compute1_service_id, {'forced_down': 'true'})
# Inject some sleeps both in the Instance.drop_migration_context and
# the MigrationList.get_in_progress_and_error code to make them
# overlap.
# We want to create the following execution scenario:
# 1) The evacuation makes a move claim on the dest including the PCI
# claim. This means there is a migration context. But the evacuation
# is not complete yet so the instance.host does not point to the
# dest host.
# 2) The dest resource tracker starts an _update_available_resource()
# periodic task and this task loads the list of instances on its
# host from the DB. Our instance is not in this list due to #1.
# 3) The evacuation finishes, the instance.host is set to the dest host
# and the migration context is deleted.
# 4) The periodic task now loads the list of in-progress migration from
# the DB to check for incoming our outgoing migrations. However due
# to #3 our instance is not in this list either.
# 5) The periodic task cleans up every lingering PCI claim that is not
# connected to any instance collected above from the instance list
# and from the migration list. As our instance is not in either of
# the lists, the resource tracker cleans up the PCI allocation for
# the already finished evacuation of our instance.
#
# Unfortunately we cannot reproduce the above situation without sleeps.
# We need that the evac starts first then the periodic starts, but not
# finishes, then evac finishes, then periodic finishes. If I trigger
# and run the whole periodic in a wrapper of drop_migration_context
# then I could not reproduce the situation described at #4). In general
# it is not
#
# evac
# |
# |
# | periodic
# | |
# | |
# | x
# |
# |
# x
#
# but
#
# evac
# |
# |
# | periodic
# | |
# | |
# | |
# x |
# |
# x
#
# what is needed need.
#
# Starting the periodic from the test in a separate thread at
# drop_migration_context() might work but that is an extra complexity
# in the test code. Also it might need a sleep still to make the
# reproduction stable but only one sleep instead of two.
orig_drop = objects.Instance.drop_migration_context
def slow_drop(*args, **kwargs):
time.sleep(1)
return orig_drop(*args, **kwargs)
self.useFixture(
fixtures.MockPatch(
'nova.objects.instance.Instance.drop_migration_context',
new=slow_drop))
orig_get_mig = objects.MigrationList.get_in_progress_and_error
def slow_get_mig(*args, **kwargs):
time.sleep(2)
return orig_get_mig(*args, **kwargs)
self.useFixture(
fixtures.MockPatch(
'nova.objects.migration.MigrationList.'
'get_in_progress_and_error',
new=slow_get_mig))
self.admin_api.post_server_action(server['id'], {'evacuate': {}})
# we trigger the _update_available_resource periodic to overlap with
# the already started evacuation
self._run_periodics()
self._wait_for_server_parameter(
server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
| openstack/nova | nova/tests/functional/regressions/test_bug_1896463.py | Python | apache-2.0 | 9,311 |
from pgshovel.interfaces.common_pb2 import (
Column,
Row,
Snapshot,
Timestamp,
)
from pgshovel.utilities.conversions import (
RowConverter,
to_snapshot,
to_timestamp,
)
from tests.pgshovel.streams.fixtures import reserialize
def test_row_conversion():
converter = RowConverter(sorted=True) # maintain sort order for equality checks
row = reserialize(
Row(
columns=[
Column(name='active', boolean=True),
Column(name='biography'),
Column(name='id', integer64=9223372036854775807),
Column(name='reputation', float=1.0),
Column(name='username', string='bob'),
],
),
)
decoded = converter.to_python(row)
assert decoded == {
'id': 9223372036854775807,
'username': 'bob',
'active': True,
'reputation': 1.0,
'biography': None,
}
assert converter.to_protobuf(decoded) == row
def test_snapshot_conversion():
assert to_snapshot('1:10:') == Snapshot(
min=1,
max=10,
)
def test_snapshot_conversion_in_progress():
assert to_snapshot('1:10:2,3,4') == Snapshot(
min=1,
max=10,
active=[2, 3, 4],
)
def test_timetamp_conversion():
assert to_timestamp(1438814328.940597) == Timestamp(
seconds=1438814328,
nanos=940597057, # this is different due to floating point arithmetic
)
| fuziontech/pgshovel | tests/pgshovel/utilities/conversions.py | Python | apache-2.0 | 1,467 |
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import matplotlib.pyplot as plt
# ============= enthought library imports =======================
# from __future__ import absolute_import
# from __future__ import print_function
# import csv
#
# import numpy as np
# from six.moves import range
# from six.moves import zip
# ============= standard library imports ========================
# ============= local library imports ==========================
from numpy import exp, pi, sqrt, hstack, arange
#
# def unmix(ages, errors, initial_guess):
# ages_errors = list(zip(ages, errors))
#
# ts = initial_guess[0]
# pis = initial_guess[1]
#
# niterations = 20
# for _ in range(niterations):
# tis_n = []
# pis_n = []
# for pi, tj in zip(pis, ts):
# pn, tn = _unmix(ages_errors, pi, tj, pis, ts)
# tis_n.append(tn)
# pis_n.append(pn)
# pis = pis_n
# ts = tis_n
# # print ts, pis
# return ts, pis
#
#
# def _unmix(ages_errors, pi_j, tj_o, pis, ts):
# n = len(ages_errors)
# s = sum([pi_j * fij(ai_ei, tj_o) / Si(pis, ai_ei, ts)
# for ai_ei in ages_errors])
#
# pi_j = 1 / float(n) * s
#
# a = sum([pi_j * ai_ei[0] * fij(ai_ei, tj_o) / (ai_ei[1] ** 2 * Si(pis, ai_ei, ts))
# for ai_ei in ages_errors])
# b = sum([pi_j * fij(ai_ei, tj_o) / (ai_ei[1] ** 2 * Si(pis, ai_ei, ts))
# for ai_ei in ages_errors])
# tj = a / b
# return pi_j, tj
#
#
# def fij(ai_ei, tj):
# ai, ei = ai_ei
# return 1 / (ei * (2 * np.pi) ** 0.5) * np.exp(-(ai - tj) ** 2 / (2 * ei ** 2))
#
#
# def Si(pis, ai_ei, ts):
# return sum([pik * fij(ai_ei, tk) for pik, tk in zip(pis, ts)])
from numpy.random.mtrand import normal
def unmix(ages, ps, ts):
"""
ages = list of 2-tuples (age, 1sigma )
:param ages:
:param ps:
:param ts:
:return:
"""
niterations = 20
for _ in range(niterations):
tis_n = []
pis_n = []
for pi, ti in zip(ps, ts):
pn, tn = tj(ages, pi, ti, ps, ts)
tis_n.append(tn)
pis_n.append(pn)
ps = pis_n
ts = tis_n
return ps, ts
def si(ai, ei, ps, ts):
return sum([pk * fij(ai, ei, tk) for pk, tk in zip(ps, ts)])
def tj(ages, pj, to, ps, ts):
n = len(ages)
pj = 1 / n * sum([pj * fij(ai, ei, to) / si(ai, ei, ps, ts) for ai, ei in ages])
a = [pj * ai * fij(ai, ei, to) / (ei ** 2 * si(ai, ei, ps, ts)) for ai, ei in ages]
b = [pj * fij(ai, ei, to) / (ei ** 2 * si(ai, ei, ps, ts)) for ai, ei in ages]
return pj, sum(a) / sum(b)
def fij(ai, ei, tj):
return 1 / (ei * sqrt(2 * pi)) * exp(-((ai - tj) ** 2) / (2 * ei ** 2))
if __name__ == "__main__":
# [35.27,36.27] [0.59, 0.41]
# p = '/Users/ross/Sandbox/unmix_data.txt'
# with open(p, 'U') as rfile:
# reader = csv.reader(rfile, delimiter='\t')
# ages, errors = [], []
#
# for line in reader:
# age = float(line[0])
# error = float(line[1])
# ages.append(age)
# errors.append(error)
# a = np.random.normal(35, 1, 10)
# b = np.random.normal(35, 1, 10)
# c = np.random.normal(35, 1, 10)
# for ai, aj, ak in zip(a, b, c):
# ps = np.random.random_sample(3)
# t = ps.sum()
# ps = ps / t
#
# initial_guess = [[ai, aj, ak], ps]
# # print 'initial', initial_guess
# # initial_guess = [[30, 40], [0.9, 0.1]]
# print(unmix(ages, errors, initial_guess))
a = normal(35, 0.1, 10)
b = normal(35.5, 0.1, 10)
ages = hstack((a, b))
errors = [0.1] * 20
ts = [35, 35.5]
ps = [0.9, 0.1]
plt.plot(sorted(a), arange(10), "bo")
plt.plot(sorted(b), arange(10, 20, 1), "ro")
print(unmix(ages, errors, ps, ts))
plt.show()
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/processing/unmix.py | Python | apache-2.0 | 4,643 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import subprocess
import tempfile
import time
import unittest
from unittest import mock
import psutil
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.cli.commands import webserver_command
from airflow.cli.commands.webserver_command import GunicornMonitor
from airflow.utils.cli import setup_locations
from tests.test_utils.config import conf_vars
class TestGunicornMonitor(unittest.TestCase):
def setUp(self) -> None:
self.monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
mock.patch.object(self.monitor, '_generate_plugin_state', return_value={}).start()
mock.patch.object(self.monitor, '_get_num_ready_workers_running', return_value=4).start()
mock.patch.object(self.monitor, '_get_num_workers_running', return_value=4).start()
mock.patch.object(self.monitor, '_spawn_new_workers', return_value=None).start()
mock.patch.object(self.monitor, '_kill_old_workers', return_value=None).start()
mock.patch.object(self.monitor, '_reload_gunicorn', return_value=None).start()
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_wait_for_workers_to_start(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 0
self.monitor._get_num_workers_running.return_value = 4
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_kill_excess_workers(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 10
self.monitor._get_num_workers_running.return_value = 10
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_start_new_workers_when_missing(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 2
self.monitor._get_num_workers_running.return_value = 2
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_start_new_workers_when_refresh_interval_has_passed(self, mock_sleep):
self.monitor._last_refresh_time -= 200
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_reload_when_plugin_has_been_changed(self, mock_sleep):
self.monitor._generate_plugin_state.return_value = {'AA': 12}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.monitor._generate_plugin_state.return_value = {'AA': 32}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.monitor._generate_plugin_state.return_value = {'AA': 32}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_called_once_with() # pylint: disable=no-member
self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
class TestGunicornMonitorGeneratePluginState(unittest.TestCase):
@staticmethod
def _prepare_test_file(filepath: str, size: int):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as file:
file.write("A" * size)
file.flush()
def test_should_detect_changes_in_directory(self):
with tempfile.TemporaryDirectory() as tempdir, mock.patch(
"airflow.cli.commands.webserver_command.settings.PLUGINS_FOLDER", tempdir
):
self._prepare_test_file(f"{tempdir}/file1.txt", 100)
self._prepare_test_file(f"{tempdir}/nested/nested/nested/nested/file2.txt", 200)
self._prepare_test_file(f"{tempdir}/file3.txt", 300)
monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
# When the files have not changed, the result should be constant
state_a = monitor._generate_plugin_state()
state_b = monitor._generate_plugin_state()
self.assertEqual(state_a, state_b)
self.assertEqual(3, len(state_a))
# Should detect new file
self._prepare_test_file(f"{tempdir}/file4.txt", 400)
state_c = monitor._generate_plugin_state()
self.assertNotEqual(state_b, state_c)
self.assertEqual(4, len(state_c))
# Should detect changes in files
self._prepare_test_file(f"{tempdir}/file4.txt", 450)
state_d = monitor._generate_plugin_state()
self.assertNotEqual(state_c, state_d)
self.assertEqual(4, len(state_d))
# Should support large files
self._prepare_test_file(f"{tempdir}/file4.txt", 4000000)
state_d = monitor._generate_plugin_state()
self.assertNotEqual(state_c, state_d)
self.assertEqual(4, len(state_d))
class TestCLIGetNumReadyWorkersRunning(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self):
self.children = mock.MagicMock()
self.child = mock.MagicMock()
self.process = mock.MagicMock()
self.monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
def test_ready_prefix_on_cmdline(self):
self.child.cmdline.return_value = [settings.GUNICORN_WORKER_READY_PREFIX]
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 1)
def test_ready_prefix_on_cmdline_no_children(self):
self.process.children.return_value = []
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
def test_ready_prefix_on_cmdline_zombie(self):
self.child.cmdline.return_value = []
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
def test_ready_prefix_on_cmdline_dead_process(self):
self.child.cmdline.side_effect = psutil.NoSuchProcess(11347)
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
class TestCliWebServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self) -> None:
self._check_processes()
self._clean_pidfiles()
def _check_processes(self, ignore_running=False):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
exit_code_pgrep_webserver = subprocess.Popen(["pgrep", "-c", "-f", "airflow webserver"]).wait()
exit_code_pgrep_gunicorn = subprocess.Popen(["pgrep", "-c", "-f", "gunicorn"]).wait()
if exit_code_pgrep_webserver != 1 or exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["ps", "-ax"]).wait()
if exit_code_pgrep_webserver != 1:
subprocess.Popen(["pkill", "-9", "-f", "airflow webserver"]).wait()
if exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["pkill", "-9", "-f", "gunicorn"]).wait()
if not ignore_running:
raise AssertionError(
"Background processes are running that prevent the test from passing successfully."
)
def tearDown(self) -> None:
self._check_processes(ignore_running=True)
self._clean_pidfiles()
def _clean_pidfiles(self):
pidfile_webserver = setup_locations("webserver")[0]
pidfile_monitor = setup_locations("webserver-monitor")[0]
if os.path.exists(pidfile_webserver):
os.remove(pidfile_webserver)
if os.path.exists(pidfile_monitor):
os.remove(pidfile_monitor)
def _wait_pidfile(self, pidfile):
start_time = time.monotonic()
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception: # pylint: disable=broad-except
if start_time - time.monotonic() > 60:
raise
time.sleep(1)
def test_cli_webserver_foreground(self):
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(["airflow", "webserver"])
self.assertEqual(None, proc.poll())
# Wait for process
time.sleep(10)
# Terminate webserver
proc.terminate()
# -15 - the server was stopped before it started
# 0 - the server terminated correctly
self.assertIn(proc.wait(60), (-15, 0))
def test_cli_webserver_foreground_with_pid(self):
with tempfile.TemporaryDirectory(prefix='tmp-pid') as tmpdir:
pidfile = f"{tmpdir}/pidfile"
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
proc = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
self.assertEqual(None, proc.poll())
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
proc.terminate()
self.assertEqual(0, proc.wait(60))
@pytest.mark.quarantined
def test_cli_webserver_background(self):
with tempfile.TemporaryDirectory(prefix="gunicorn") as tmpdir, mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
pidfile_webserver = f"{tmpdir}/pidflow-webserver.pid"
pidfile_monitor = f"{tmpdir}/pidflow-webserver-monitor.pid"
stdout = f"{tmpdir}/airflow-webserver.out"
stderr = f"{tmpdir}/airflow-webserver.err"
logfile = f"{tmpdir}/airflow-webserver.log"
try:
# Run webserver as daemon in background. Note that the wait method is not called.
proc = subprocess.Popen(
[
"airflow",
"webserver",
"--daemon",
"--pid",
pidfile_webserver,
"--stdout",
stdout,
"--stderr",
stderr,
"--log-file",
logfile,
]
)
self.assertEqual(None, proc.poll())
pid_monitor = self._wait_pidfile(pidfile_monitor)
self._wait_pidfile(pidfile_webserver)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(
0, subprocess.Popen(["pgrep", "-f", "-c", "airflow webserver --daemon"]).wait()
)
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "-f", "gunicorn: master"]).wait())
# Terminate monitor process.
proc = psutil.Process(pid_monitor)
proc.terminate()
self.assertIn(proc.wait(120), (0, None))
self._check_processes()
except Exception:
# List all logs
subprocess.Popen(["ls", "-lah", tmpdir]).wait()
# Dump all logs
subprocess.Popen(["bash", "-c", f"ls {tmpdir}/* | xargs -n 1 -t cat"]).wait()
raise
# Patch for causing webserver timeout
@mock.patch(
"airflow.cli.commands.webserver_command.GunicornMonitor._get_num_workers_running", return_value=0
)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
webserver_command.webserver(args)
self.assertEqual(e.exception.code, 1)
def test_cli_webserver_debug(self):
env = os.environ.copy()
proc = psutil.Popen(["airflow", "webserver", "--debug"], env=env)
time.sleep(3) # wait for webserver to start
return_code = proc.poll()
self.assertEqual(
None, return_code, f"webserver terminated with return code {return_code} in debug mode"
)
proc.terminate()
self.assertEqual(-15, proc.wait(60))
def test_cli_webserver_access_log_format(self):
# json access log format
access_logformat = (
"{\"ts\":\"%(t)s\",\"remote_ip\":\"%(h)s\",\"request_id\":\"%({"
"X-Request-Id}i)s\",\"code\":\"%(s)s\",\"request_method\":\"%(m)s\","
"\"request_path\":\"%(U)s\",\"agent\":\"%(a)s\",\"response_time\":\"%(D)s\","
"\"response_length\":\"%(B)s\"} "
)
with tempfile.TemporaryDirectory() as tmpdir, mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
access_logfile = f"{tmpdir}/access.log"
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(
[
"airflow",
"webserver",
"--access-logfile",
access_logfile,
"--access-logformat",
access_logformat,
]
)
self.assertEqual(None, proc.poll())
# Wait for webserver process
time.sleep(10)
proc2 = subprocess.Popen(["curl", "http://localhost:8080"])
proc2.wait(10)
try:
file = open(access_logfile)
log = json.loads(file.read())
self.assertEqual('127.0.0.1', log.get('remote_ip'))
self.assertEqual(len(log), 9)
self.assertEqual('GET', log.get('request_method'))
except OSError:
print("access log file not found at " + access_logfile)
# Terminate webserver
proc.terminate()
# -15 - the server was stopped before it started
# 0 - the server terminated correctly
self.assertIn(proc.wait(60), (-15, 0))
self._check_processes()
| DinoCow/airflow | tests/cli/commands/test_webserver_command.py | Python | apache-2.0 | 18,370 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.async import async
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class TimerEvent(async.DelayedCall):
def __init__(self, actor_id, delay, trigger_loop, repeats=False):
super(TimerEvent, self).__init__(delay, callback=self.trigger)
self._actor_id = actor_id
self._triggered = False
self.trigger_loop = trigger_loop
self.repeats = repeats
_log.debug("Set calvinsys timer %f %s on %s" % (delay, "repeat" if self.repeats else "", self._actor_id))
@property
def triggered(self):
return self._triggered
def ack(self):
self._triggered = False
def trigger(self):
_log.debug("Trigger calvinsys timer on %s" % (self._actor_id))
self._triggered = True
if self.repeats:
self.reset()
self.trigger_loop(actor_ids=[self._actor_id])
class TimerHandler(object):
def __init__(self, node, actor):
super(TimerHandler, self).__init__()
self._actor = actor
self.node = node
def once(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop)
def repeat(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop, repeats=True)
def register(node, actor, events=None):
"""
Registers is called when the Event-system object is created.
Place an object in the event object - in this case the
nodes only timer object.
Also register any hooks for actor migration.
@TODO: Handle migration (automagically and otherwise.)
"""
return TimerHandler(node=node, actor=actor)
| les69/calvin-base | calvin/calvinsys/events/timer.py | Python | apache-2.0 | 2,309 |
#!/usr/bin/python
import io
import os
import unittest
import logging
import uuid
from mediafire import MediaFireApi, MediaFireUploader, UploadSession
from mediafire.uploader import UPLOAD_SIMPLE_LIMIT_BYTES
APP_ID = '42511'
MEDIAFIRE_EMAIL = os.environ.get('MEDIAFIRE_EMAIL')
MEDIAFIRE_PASSWORD = os.environ.get('MEDIAFIRE_PASSWORD')
class MediaFireSmokeBaseTestCase(object):
"""Smoke tests for API"""
class BaseTest(unittest.TestCase):
def setUp(self):
# Reset logging to info to avoid leaking credentials
logger = logging.getLogger('mediafire.api')
logger.setLevel(logging.INFO)
self.api = MediaFireApi()
session = self.api.user_get_session_token(
app_id=APP_ID, email=MEDIAFIRE_EMAIL,
password=MEDIAFIRE_PASSWORD)
self.api.session = session
@unittest.skipIf('CI' not in os.environ, "Running outside CI environment")
class MediaFireSmokeSimpleTest(MediaFireSmokeBaseTestCase.BaseTest):
"""Simple tests"""
def test_user_get_info(self):
result = self.api.user_get_info()
self.assertEqual(result["user_info"]["display_name"],
u"Coalmine Smoketest")
@unittest.skipIf('CI' not in os.environ, "Running outside CI environment")
class MediaFireSmokeWithDirectoryTest(MediaFireSmokeBaseTestCase.BaseTest):
"""Smoke tests requiring temporary directory"""
def setUp(self):
super(MediaFireSmokeWithDirectoryTest, self).setUp()
folder_uuid = str(uuid.uuid4())
result = self.api.folder_create(foldername=folder_uuid)
self.folder_key = result["folder_key"]
def tearDown(self):
self.api.folder_purge(self.folder_key)
def test_upload_small(self):
"""Test simple upload"""
# make sure we most likely will get upload/simple
data = b'This is a tiny file content: ' + os.urandom(32)
fd = io.BytesIO(data)
uploader = MediaFireUploader(self.api)
with UploadSession(self.api):
result = uploader.upload(fd, 'smallfile.txt',
folder_key=self.folder_key)
self.assertIsNotNone(result.quickkey)
self.assertEqual(result.action, 'upload/simple')
def test_upload_large(self):
"""Test large file upload"""
# make sure we will get upload/resumable, prefix + 4MiB
data = b'Long line is long: ' + os.urandom(UPLOAD_SIMPLE_LIMIT_BYTES)
fd = io.BytesIO(data)
uploader = MediaFireUploader(self.api)
with UploadSession(self.api):
result = uploader.upload(fd, 'bigfile.txt',
folder_key=self.folder_key)
self.assertIsNotNone(result.quickkey)
self.assertEqual(result.action, 'upload/resumable')
if __name__ == "__main__":
unittest.main()
| MediaFire/mediafire-python-open-sdk | tests/test_smoke.py | Python | bsd-2-clause | 2,879 |
from nose.tools import (assert_is_none, assert_is_instance, assert_in,
assert_is_not_none, assert_true, assert_false,
assert_equal)
from datetime import datetime
from mongoengine import connect
from qirest_client.model.subject import Subject
from qirest_client.model.uom import Weight
from qirest_client.model.clinical import (Biopsy, Surgery, Drug)
from qirest.test.helpers import seed
MODELING_RESULT_PARAMS = ['fxl_k_trans', 'fxr_k_trans', 'delta_k_trans', 'v_e', 'tau_i']
"""The test seed modeling result parameters."""
class TestSeed(object):
"""
This TestSeed class tests the seed helper utility.
Note: this test drops the ``qiprofile-test`` Mongo database
at the beginning and end of execution.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
self._subjects = seed.seed()
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_serialization(self):
for saved_sbj in self._subjects:
query = dict(project=saved_sbj.project,
collection=saved_sbj.collection,
number=saved_sbj.number)
fetched_sbj = Subject.objects.get(**query)
self._validate_subject(fetched_sbj)
SESSION_CNT = dict(
Breast=4,
Sarcoma=3
)
def test_reseed(self):
subjects = seed.seed()
expected = set(str(sbj) for sbj in self._subjects)
actual = set(str(sbj) for sbj in subjects)
assert_equal(actual, expected, "Reseed result is incorrect -"
"\nexpected:\n%s\nfound:\n%s" %
(expected, actual))
def _validate_subject(self, subject):
collections = ((coll.name for coll in seed.COLLECTION_BUILDERS))
assert_in(subject.collection, collections,
"Collection is invalid: %s" % subject.collection)
self._validate_demographics(subject)
self._validate_clincal_data(subject)
self._validate_sessions(subject)
def _validate_demographics(self, subject):
assert_is_not_none(subject.gender, "%s is missing gender" % subject)
def _validate_clincal_data(self, subject):
# There are three treatments.
self._validate_treatments(subject)
# Validate the clinical encounters.
self._validate_clinical_encounters(subject)
def _validate_treatments(self, subject):
# There are three treatments.
treatments = subject.treatments
assert_is_not_none(treatments, "%s has no treatments" % subject)
assert_equal(len(treatments), 3,
"%s Subject %d treatments count is incorrect: %d" %
(subject.collection, subject.number, len(treatments)))
# Breast has neoadjuvant drugs.
if subject.collection == 'Breast':
self._validate_breast_treatments(subject, treatments)
def _validate_breast_treatments(self, subject, treatments):
# Breast has neoadjuvant drugs.
neo_rx = next(((trt for trt in treatments if trt.treatment_type == 'Neoadjuvant')),
None)
assert_is_not_none(neo_rx, ("%s Subject %d is missing a neodjuvant" +
" treatment") % (subject.collection, subject.number))
dosages = neo_rx.dosages
assert_equal(len(dosages), 2,
(("%s session %d neoadjuvant treatment dosage count is" +
" incorrect: %d") % (subject.collection, subject.number, len(dosages))))
# Validate the agent type and dosage unit.
for dosage in dosages:
agent = dosage.agent
assert_is_instance(agent, Drug,
"%s Subject %d neoadjuvant agent is not a drug" %
(subject.collection, subject.number))
amount = dosage.amount
assert_is_not_none(amount, ("%s Subject %d is missing a neodjuvant drug" +
" dosage amount") % (subject.collection, subject.number))
def _validate_clinical_encounters(self, subject):
# There are two clinical encounters.
cln_encs = list(subject.clinical_encounters)
assert_is_not_none(cln_encs, "%s has no encounters" % subject)
assert_equal(len(cln_encs), 2,
"%s Subject %d encounter count is incorrect: %d" %
(subject.collection, subject.number, len(cln_encs)))
# Each encounter has a subject weight.
for enc in cln_encs:
assert_is_not_none(enc.weight, "%s encounter %s is missing the"
" subject weight" % (subject, enc))
assert_is_instance(enc.weight, int,
"%s encounter %s weight type is incorrect: %s" %
(subject, enc, enc.weight.__class__))
# There is a biopsy with a pathology report.
biopsy = next((enc for enc in cln_encs if isinstance(enc, Biopsy)),
None)
assert_is_not_none(biopsy, "%s Subject %d is missing a biopsy" %
(subject.collection, subject.number))
self._validate_pathology(subject, biopsy.pathology)
# Breast pre-neoadjuvant biopsy does not have a RCB.
if subject.collection == 'Breast':
tumor_pathology = biopsy.pathology.tumors[0]
assert_is_none(tumor_pathology.rcb,
"%s biopsy pathology report incorrectly has a RCB"
" status" % subject)
# There is a surgery with a pathology report.
surgery = next((enc for enc in cln_encs if isinstance(enc, Surgery)),
None)
assert_is_not_none(surgery, "%s Subject %d is missing a surgery" %
(subject.collection, subject.number))
assert_is_not_none(surgery.pathology,
"%s surgery is missing a pathology report" % subject)
self._validate_pathology(subject, surgery.pathology)
# Surgery has a RCB.
if subject.collection == 'Breast':
tumor_pathology = surgery.pathology.tumors[0]
assert_is_not_none(tumor_pathology.rcb,
"%s surgery pathology report is missing a"
" RCB status" % subject)
def _validate_pathology(self, subject, pathology_report):
assert_is_not_none(pathology_report, "%s is missing a pathology"
" report" % subject)
assert_false(len(pathology_report.tumors) == 0,
"%s has no pathology tumor report")
for tumor_pathology in pathology_report.tumors:
self._validate_tnm(subject, tumor_pathology.tnm)
# The tumor-specific tests.
if subject.collection == 'Breast':
self._validate_breast_pathology(subject, tumor_pathology)
elif subject.collection == 'Sarcoma':
self._validate_sarcoma_pathology(subject, tumor_pathology)
def _validate_tnm(self, subject, tnm):
assert_is_not_none(tnm, "%s is missing a TNM" % subject)
assert_is_not_none(tnm.tumor_type,
"%s TNM is missing the tumor type" % subject)
assert_is_not_none(tnm.grade,
"%s TNM is missing the grade" % subject)
assert_is_not_none(tnm.size,
"%s TNM is missing the composite size object" %
subject)
assert_is_not_none(tnm.size.tumor_size,
"%s TNM is missing the size score" % subject)
assert_is_not_none(tnm.lymph_status,
"%s TNM is missing the lymph status" % subject)
assert_is_not_none(tnm.lymphatic_vessel_invasion,
"%s TNM is missing the lymphati vessel invasion"
% subject)
assert_is_not_none(tnm.metastasis,
"%s TNM is missing the metastasis" % subject)
def _validate_breast_pathology(self, subject, pathology):
estrogen = next((hr for hr in pathology.hormone_receptors
if hr.hormone == 'estrogen'),
None)
assert_is_not_none(estrogen, "%s pathology report is missing"
" an estrogen status" % subject)
progesterone = next((hr for hr in pathology.hormone_receptors
if hr.hormone == 'progesterone'),
None)
assert_is_not_none(progesterone, "%s pathology report is missing a"
" progesterone status" % subject)
assert_is_not_none(pathology.genetic_expression,
"%s pathology report is missing a genetic"
" expression status" % subject)
assert_is_not_none(pathology.genetic_expression.her2_neu_ihc,
"%s pathology report is missing a"
" HER2 NEU IHC status" % subject)
assert_is_not_none(pathology.genetic_expression.her2_neu_fish,
"%s pathology report is missing a"
" HER2 NEU FISH status" % subject)
assert_is_not_none(pathology.genetic_expression.ki67,
"%s pathology report is missing a"
" Ki67 status" % subject)
# The first breast subject has value overrides.
if subject.number == 1:
assert_true(estrogen.positive, "The first Breast subject is not"
" estrogen-receptor-positive")
assert_equal(pathology.tnm.lymph_status, 0,
"The first Breast subject lymph status is incorrect")
# A subject who is estrogen-receptor-positive and has no lymph nodes
# has a normalized assay.
if estrogen.positive and not pathology.tnm.lymph_status:
assay = pathology.genetic_expression.normalized_assay
assert_is_not_none(assay, "%s pathology report with HER2"
" positive and no lymph nodes is missing"
" a normalized assay" % subject)
assert_is_not_none(assay.gstm1, "%s pathology report"
" normalized assay is missing"
" a GSTM1 result" % subject)
assert_is_not_none(assay.cd68, "%s pathology report"
" normalized assay is missing"
" a CD68 result" % subject)
assert_is_not_none(assay.bag1, "%s pathology report"
" normalized assay is missing"
" a BAG1 result" % subject)
assert_is_not_none(assay.her2, "%s pathology report"
" normalized assay is missing"
" the HER2 group" % subject)
assert_is_not_none(assay.estrogen, "%s pathology report"
" normalized assay is missing"
" the estrogen group" % subject)
assert_is_not_none(assay.proliferation, "%s pathology report"
" normalized assay is"
" missing the proliferation"
" group" % subject)
assert_is_not_none(assay.invasion, "%s pathology report"
" normalized assay is missing"
" the invasion group" % subject)
def _validate_sarcoma_pathology(self, subject, pathology):
assert_is_not_none(pathology.location,
"%s pathology report is missing a tumor location" % subject)
def _validate_sessions(self, subject):
sessions = list(subject.sessions)
assert_is_not_none(sessions, "%s has no sessions" % subject)
session_cnt = TestSeed.SESSION_CNT[subject.collection]
assert_equal(len(sessions), session_cnt, "%s session count is incorrect: %d" %
(subject, len(sessions)))
for i, session in enumerate(sessions):
# Set a session number for reporting.
session.number = i + 1
self._validate_session(subject, session)
def _validate_session(self, subject, session):
assert_is_not_none(session.date,
"%s session %d is missing the acquisition date" %
(subject, session.number))
assert_is_instance(session.date, datetime,
"%s session %d acquisition date type is incorrect: %s" %
(subject, session.number, session.date.__class__))
self._validate_modeling(subject, session)
self._validate_session_detail(subject, session)
def _validate_modeling(self, subject, session):
# The registration is modeled.
assert_equal(len(session.modelings), 1,
"%s session %d modeling length is incorrect: %d" %
(subject, session.number, len(session.modelings)))
modeling = session.modelings[0]
assert_is_not_none(modeling.resource,
"%s session %d is missing the modeling resource" %
(subject, session.number))
assert_is_not_none(modeling.protocol,
"%s session %d modeling %s is missing the protocol" %
(subject, session.number, modeling.resource))
assert_is_not_none(modeling.source,
"%s session %d modeling %s is missing the source" %
(subject, session.number, modeling.resource))
# Validate the modeling result.
for param in MODELING_RESULT_PARAMS:
value = modeling.result[param]
assert_is_not_none(value,
"%s Subject %d modeling %s is missing a %s parameter" %
(subject.collection, subject.number, modeling.resource, param))
assert_is_not_none(value.image,
"%s Subject %d modeling %s is missing a %s image" %
(subject.collection, subject.number, modeling.resource, param))
metadata = value.image.metadata
assert_is_not_none(metadata,
"%s Subject %d modeling %s is missing %s metadata" %
(subject.collection, subject.number, modeling.resource, param))
avg = metadata.get('average_intensity')
assert_is_not_none(avg,
"%s Subject %d modeling %s is missing %s intensity" %
(subject.collection, subject.number, modeling.resource, param))
# The delta Ktrans result has an overlay.
label_map = modeling.result['delta_k_trans'].label_map
assert_is_not_none(label_map,
"%s Subject %d modeling is missing a label_map" %
(subject.collection, subject.number))
assert_is_not_none(label_map.name,
"%s Subject %d modeling label map is missing a file name" %
(subject.collection, subject.number))
assert_is_not_none(label_map.color_table,
"%s Subject %d modeling label map is missing a color table" %
(subject.collection, subject.number))
def _validate_session_detail(self, subject, session):
assert_is_not_none(session.detail, "%s session %d is missing detail" %
(subject, session.number))
# Validate the scans.
scans = session.detail.scans
assert_equal(len(scans), 2, "%s session %d scan count is incorrect: %d" %
(subject, session.number, len(scans)))
# The T1 scan.
scan = scans[0]
coll = seed.builder_for(subject.collection)
expected_volume_cnt = coll.options.volume_count
assert_equal(len(scan.volumes.images), expected_volume_cnt,
"%s session %d scan %d volumes count is incorrect: %d" %
(subject, session.number, scan.number, len(scan.volumes.images)))
for i, image in enumerate(scan.volumes.images):
assert_is_not_none(image.metadata,
"%s session %d scan %d volume %d is missing metadata" %
(subject, session.number, scan.number, i + 1))
avg = image.metadata.get('average_intensity')
assert_is_not_none(avg,
"%s session %d scan %d volume %d is missing an intensity" %
(subject, session.number, scan.number, i + 1))
# Verify that intensities are floats.
assert_true(isinstance(avg, float),
"%s session %d scan %d volume %d intensity type is"
" incorrect for value %s: %s" %
(subject, session.number, scan.number, i + 1, avg, avg.__class__))
# Validate the registration.
regs = scan.registrations
assert_equal(len(regs), 1, "%s session %d scan %d registration count"
" is incorrect: %d" %
(subject, session.number, scan.number, len(regs)))
for reg in regs:
for i, image in enumerate(reg.volumes.images):
assert_is_not_none(image.metadata,
"%s session %d scan %d registration %s volume %d"
" is missing metadata" %
(subject, session.number, scan.number,
reg.volumes.name, i + 1))
avg = image.metadata.get('average_intensity')
assert_is_not_none(avg,
"%s session %d scan %d registration %s volume %d"
" is missing an intensity" %
(subject, session.number, scan.number,
reg.volumes.name, i + 1))
assert_true(isinstance(avg, float),
"%s session %d scan %d registration %s volume %d intensity"
" type is incorrect for value %s: %s" %
(subject, session.number, scan.number, reg.volumes.name,
i + 1, avg, avg.__class__))
# The T2 scan has one volume without an intensity value.
scan = scans[1]
assert_equal(len(scan.volumes.images), 1,
"%s session %d scan %d volumes count is incorrect: %d" %
(subject, session.number, scan.number, len(scan.volumes.images)))
image = scan.volumes.images[0]
assert_true(not image.metadata,
"%s session %d scan %d volume incorrectly has metadata" %
(subject, session.number, scan.number))
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
| ohsu-qin/qirest | qirest/test/unit/test_seed.py | Python | bsd-2-clause | 19,873 |
import rppy
import numpy as np
import matplotlib.pyplot as plt
vp1 = 3000
vs1 = 1500
p1 = 2000
e1_1 = 0.0
d1_1 = 0.0
y1_1 = 0.0
e2_1 = 0.0
d2_1 = 0.0
y2_1 = 0.0
d3_1 = 0.0
chi1 = 0.0
C1 = rppy.reflectivity.Cij(vp1, vs1, p1, e1_1, d1_1, y1_1, e2_1, d2_1, y2_1, d3_1)
vp2 = 4000
vs2 = 2000
p2 = 2200
e1_2 = 0.0
d1_2 = 0.0
y1_2 = 0.0
e2_2 = 0.0
d2_2 = 0.0
y2_2 = 0.0
d3_2 = 0.0
chi2 = 0.0
C2 = rppy.reflectivity.Cij(vp2, vs2, p2, e1_2, d1_2, y1_2, e2_2, d2_2, y2_2, d3_2)
phi = np.arange(0, 90, 1)
theta = np.arange(0, 90, 1)
loopang = phi
theta = np.array([30])
rphti = np.zeros(np.shape(loopang))
rpzoe = np.zeros(np.shape(loopang))
rprug = np.zeros(np.shape(loopang))
for aid, val in enumerate(loopang):
rphti[aid] = rppy.reflectivity.exact_ortho(C1, p1, C2, p2, chi1, chi2, loopang[aid], theta)
rprug[aid] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e2_1, d2_1, y2_1, vp2, vs2, p2, e2_2, d2_2, y2_2, np.radians(theta), np.radians(loopang[aid]))
rpzoe[aid] = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, np.radians(theta))
plt.figure(1)
plt.plot(loopang, rphti, loopang, rprug, loopang, rpzoe)
plt.legend(['hti', 'ruger', 'zoe'])
plt.show() | shear/rppy | temp_test_ortho.py | Python | bsd-2-clause | 1,170 |
"""
WSGI config for skeleton project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "skeleton.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| universalcore/unicore-cms-django | project/wsgi.py | Python | bsd-2-clause | 1,424 |
from invoke import task, Collection
@task
def toplevel(ctx):
pass
@task
def subtask(ctx):
pass
ns = Collection(
toplevel,
Collection('a', subtask,
Collection('nother', subtask)
)
)
| mkusz/invoke | tests/_support/deeper_ns_list.py | Python | bsd-2-clause | 212 |
from cStringIO import StringIO
from datetime import datetime
from unidecode import unidecode
from handler import Patobj, PatentHandler
import re
import uuid
import xml.sax
import xml_util
import xml_driver
xml_string = 'ipg050104.xml'
xh = xml_driver.XMLHandler()
parser = xml_driver.make_parser()
parser.setContentHandler(xh)
parser.setFeature(xml_driver.handler.feature_external_ges, False)
l = xml.sax.xmlreader.Locator()
xh.setDocumentLocator(l)
#parser.parse(StringIO(xml_string))
parser.parse(xml_string)
print "parsing done"
#print type(xh.root.us_bibliographic_data_grant.publication_reference.contents_of('document_id', '', as_string=False))
print xh.root.claims.contents_of('claim', '', as_string=True, upper=False)
#print type(xh.root.us_bibliographic_data_grant.publication_reference.contents_of('document_id', '', as_string=True))
#print xh.root.us_bibliographic_data_grant.publication_reference.contents_of('document_id', '', as_string=True)
| namunu/MBS_Patent | parser_test/test.py | Python | bsd-2-clause | 962 |
# --------------------------------------------------------------------------------------
# Copyright 2016, Benedikt J. Daurer, Filipe R.N.C. Maia, Max F. Hantke, Carl Nettelblad
# Hummingbird is distributed under the terms of the Simplified BSD License.
# -------------------------------------------------------------------------
"""Translates between LCLS events and Hummingbird ones"""
from __future__ import print_function # Compatibility with python 2 and 3
import os
import logging
from backend.event_translator import EventTranslator
from backend.record import Record, add_record
import psana
import numpy
import datetime
from pytz import timezone
from . import ureg
from backend import Worker
import ipc
from hummingbird import parse_cmdline_args
_argparser = None
def add_cmdline_args():
global _argparser
from utils.cmdline_args import argparser
_argparser = argparser
group = _argparser.add_argument_group('LCLS', 'Options for the LCLS event translator')
group.add_argument('--lcls-run-number', metavar='lcls_run_number', nargs='?',
help="run number",
type=int)
group.add_argument('--lcls-number-of-frames', metavar='lcls_number_of_frames', nargs='?',
help="number of frames to be processed",
type=int)
# ADUthreshold for offline analysis
#group.add_argument('--ADUthreshold', metavar='ADUthreshold', nargs='?',
# help="ADU threshold",
# type=int)
# Hitscore threshold for offline analysis
#group.add_argument('--hitscore-thr', metavar='hitscore_thr', nargs='?',
# help="Hitscore threshold",
# type=int)
# Output directory for offline analysis
#group.add_argument('--out-dir', metavar='out_dir', nargs='?',
# help="Output directory",
# type=str)
# Reduce output from offline analysis
#group.add_argument('--reduced-output',
# help="Write only very few data to output file",
# action='store_true')
PNCCD_IDS = ['pnccdFront', 'pnccdBack']
ACQ_IDS = [('ACQ%i' % i) for i in range(1,4+1)]
class LCLSTranslator(object):
"""Translate between LCLS events and Hummingbird ones"""
def __init__(self, state):
self.timestamps = None
self.library = 'psana'
config_file = None
if('LCLS/PsanaConf' in state):
config_file = os.path.abspath(state['LCLS/PsanaConf'])
elif('LCLS' in state and 'PsanaConf' in state['LCLS']):
config_file = os.path.abspath(state['LCLS']['PsanaConf'])
if(config_file is not None):
if(not os.path.isfile(config_file)):
raise RuntimeError("Could not find [LCLS][PsanaConf]: %s" %
(config_file))
logging.info("Info: Found configuration file %s.", config_file)
psana.setConfigFile(config_file)
if 'LCLS/CalibDir' in state:
calibdir = state['LCLS/CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
elif('LCLS' in state and 'CalibDir' in state['LCLS']):
calibdir = state['LCLS']['CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
if('LCLS/DataSource' in state):
dsrc = state['LCLS/DataSource']
elif('LCLS' in state and 'DataSource' in state['LCLS']):
dsrc = state['LCLS']['DataSource']
else:
raise ValueError("You need to set the '[LCLS][DataSource]'"
" in the configuration")
cmdline_args = _argparser.parse_args()
self.N = cmdline_args.lcls_number_of_frames
if cmdline_args.lcls_run_number is not None:
dsrc += ":run=%i" % cmdline_args.lcls_run_number
# Cache times of events that shall be extracted from XTC (does not work for stream)
self.event_slice = slice(0,None,1)
if 'times' in state or 'fiducials' in state:
if not ('times' in state and 'fiducials' in state):
raise ValueError("Times or fiducials missing in state."
" Extraction of selected events expects both event identifiers")
if dsrc[:len('exp=')] != 'exp=':
raise ValueError("Extraction of events with given times and fiducials"
" only works when reading from XTC with index files")
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
self.times = state['times']
self.fiducials = state['fiducials']
self.i = 0
self.data_source = psana.DataSource(dsrc)
self.run = self.data_source.runs().next()
elif 'indexing' in state:
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
if 'index_offset' in state:
self.i = state['index_offset'] / ipc.mpi.nr_event_readers()
else:
self.i = 0
self.data_source = psana.DataSource(dsrc)
self.run = self.data_source.runs().next()
self.timestamps = self.run.times()
if self.N is not None:
self.timestamps = self.timestamps[:self.N]
self.timestamps = self.timestamps[ipc.mpi.event_reader_rank()::ipc.mpi.nr_event_readers()]
else:
self.times = None
self.fiducials = None
self.i = 0
if not dsrc.startswith('shmem='):
self.event_slice = slice(ipc.mpi.event_reader_rank(), None, ipc.mpi.nr_event_readers())
self.data_source = psana.DataSource(dsrc)
self.run = None
# Define how to translate between LCLS types and Hummingbird ones
self._n2c = {}
self._n2c[psana.Bld.BldDataFEEGasDetEnergy] = 'pulseEnergies'
self._n2c[psana.Bld.BldDataFEEGasDetEnergyV1] = 'pulseEnergies'
self._n2c[psana.Lusi.IpmFexV1] = 'pulseEnergies'
self._n2c[psana.Camera.FrameV1] = 'camera'
# Guard against old(er) psana versions
try:
self._n2c[psana.Bld.BldDataEBeamV1] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV2] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV3] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV4] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV5] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV6] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV7] = 'photonEnergies'
except AttributeError:
pass
# CXI (CsPad)
self._n2c[psana.CsPad.DataV2] = 'photonPixelDetectors'
self._n2c[psana.CsPad2x2.ElementV1] = 'photonPixelDetectors'
# CXI (OffAxis Cam)
#self._n2c[psana.Camera.FrameV1] = 'photonPixelDetectors'
# AMO (pnCCD)
self._n2c[psana.PNCCD.FullFrameV1] = 'photonPixelDetectors'
self._n2c[psana.PNCCD.FramesV1] = 'photonPixelDetectors'
# --
self._n2c[psana.Acqiris.DataDescV1] = 'ionTOFs'
self._n2c[psana.EventId] = 'eventID'
# Guard against old(er) psana versions
try:
self._n2c[psana.EvrData.DataV3] = 'eventCodes'
self._n2c[psana.EvrData.DataV4] = 'eventCodes'
except AttributeError:
pass
# Calculate the inverse mapping
self._c2n = {}
for k, v in self._n2c.iteritems():
self._c2n[v] = self._c2n.get(v, [])
self._c2n[v].append(k)
# Define how to translate between LCLS sources and Hummingbird ones
self._s2c = {}
# CXI (OnAxis Cam)
self._s2c['DetInfo(CxiEndstation.0:Opal4000.1)'] = 'Sc2Questar'
# CXI (OffAxis Cam)
self._s2c['DetInfo(CxiEndstation.0.Opal11000.0)'] = 'Sc2Offaxis'
# CXI (CsPad)
self._s2c['DetInfo(CxiDs1.0:Cspad.0)'] = 'CsPad Ds1'
self._s2c['DetInfo(CxiDsd.0:Cspad.0)'] = 'CsPad Dsd'
self._s2c['DetInfo(CxiDs2.0:Cspad.0)'] = 'CsPad Ds2'
self._s2c['DetInfo(CxiDg3.0:Cspad2x2.0)'] = 'CsPad Dg3'
self._s2c['DetInfo(CxiDg2.0:Cspad2x2.0)'] = 'CsPad Dg2'
# AMO (pnCCD)
self._s2c['DetInfo(Camp.0:pnCCD.1)'] = 'pnccdBack'
self._s2c['DetInfo(Camp.0:pnCCD.0)'] = 'pnccdFront'
# ToF detector
self._s2c['DetInfo(AmoEndstation.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(AmoEndstation.0:Acqiris.1)'] = 'Acqiris 1'
self._s2c['DetInfo(AmoEndstation.0:Acqiris.2)'] = 'Acqiris 2'
# AMO (Acqiris)
self._s2c['DetInfo(AmoETOF.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(AmoETOF.0:Acqiris.1)'] = 'Acqiris 1'
self._s2c['DetInfo(AmoITOF.0:Acqiris.0)'] = 'Acqiris 2'
self._s2c['DetInfo(AmoITOF.0:Acqiris.1)'] = 'Acqiris 3'
# MCP Camera
self._s2c['DetInfo(AmoEndstation.0:Opal1000.1)'] = 'OPAL1'
# CXI (Acqiris)
self._s2c['DetInfo(CxiEndstation.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(CxiEndstation.0:Acqiris.1)'] = 'Acqiris 1'
self.init_detectors(state)
#print("Detectors:" , psana.DetNames())
def init_detectors(self, state):
# New psana call pattern
self._detectors = {}
self._c2id_detectors = {}
if 'detectors' in state:
for detid, det_dict in state['detectors'].items():
if detid in PNCCD_IDS:
self._detectors[detid] = {}
self._detectors[detid]['id'] = det_dict['id']
self._detectors[detid]['type'] = det_dict['type']
self._detectors[detid]['key'] = det_dict['key']
obj = psana.Detector(det_dict['id'])
self._detectors[detid]['obj'] = obj
meth = det_dict['data_method']
if meth == "image":
f = lambda obj, evt: obj.image(evt)
elif meth == "calib":
f = lambda obj, evt: obj.calib(evt)
elif meth == "raw":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
raw = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True)
return raw
elif meth == "calib_pc":
def f(obj, evt):
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
return cdata
elif meth == "calib_cmc":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
rnum = obj.runnum(evt)
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
obj.common_mode_apply(rnum, cdata, cmpars=None)
return cdata
elif meth == "calib_gc":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
rnum = obj.runnum(evt)
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
obj.common_mode_apply(rnum, cdata, cmpars=None)
gain = obj.gain(evt)
cdata *= gain
return cdata
else:
raise RuntimeError('data_method = %s not supported' % meth)
self._detectors[detid]['data_method'] = f
self._c2id_detectors[det_dict['type']] = detid
print("Set data method for detector id %s to %s." % (det_dict['id'], meth))
elif detid in ACQ_IDS:
self._detectors[detid] = {}
self._detectors[detid]['id'] = det_dict['id']
self._detectors[detid]['type'] = det_dict['type']
self._detectors[detid]['keys'] = det_dict['keys']
obj = psana.Detector(det_dict['id'])
self._detectors[detid]['obj'] = obj
self._c2id_detectors[det_dict['type']] = detid
else:
raise RuntimeError('Detector type = %s not implememented for ID %s' % (det_dict['type'], detid))
def next_event(self):
"""Grabs the next event and returns the translated version"""
if self.timestamps:
try:
evt = self.run.event(self.timestamps[self.i])
except (IndexError, StopIteration) as e:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
self.i += 1
elif self.times is not None:
evt = None
while self.i < len(self.times) and evt is None:
time = psana.EventTime(int(self.times[self.i]), self.fiducials[self.i])
self.i += 1
evt = self.run.event(time)
if evt is None:
print("Unable to find event listed in index file")
# We got to the end without a valid event, time to call it a day
if evt is None:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
else:
try:
while (self.i % self.event_slice.step) != self.event_slice.start:
evt = self.data_source.events().next()
self.i += 1
if self.N is not None and self.i >= self.N:
raise StopIteration
evt = self.data_source.events().next()
self.i += 1
except StopIteration:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
return EventTranslator(evt, self)
def event_keys(self, evt):
"""Returns the translated keys available"""
native_keys = evt.keys()
common_keys = set()
for k in native_keys:
for c in self._native_to_common(k):
common_keys.add(c)
# parameters corresponds to the EPICS values, analysis is for values added later on
return list(common_keys)+['parameters']+['analysis']
def _native_to_common(self, key):
"""Translates a native key to a hummingbird one"""
if(key.type() in self._n2c):
return [self._n2c[key.type()]]
else:
return []
def event_native_keys(self, evt):
"""Returns the native keys available"""
return evt.keys()
def translate(self, evt, key):
"""Returns a dict of Records that match a given humminbird key"""
values = {}
if(key in self._c2id_detectors):
return self.translate_object(evt, key)
elif(key in self._c2n):
return self.translate_core(evt, key)
elif(key == 'parameters'):
return self._tr_epics()
elif(key == 'analysis'):
return {}
elif(key == 'stream'):
return {}
else:
# check if the key matches any of the existing keys in the event
event_keys = evt.keys()
values = {}
found = False
for event_key in event_keys:
if(event_key.key() == key):
obj = evt.get(event_key.type(), event_key.src(), event_key.key())
found = True
add_record(values, 'native', '%s[%s]' % (self._s2c[str(event_key.src())], key),
obj, ureg.ADU)
if(found):
return values
else:
print('%s not found in event' % (key))
def translate_object(self, evt, key):
values = {}
detid = self._c2id_detectors[key]
if detid in PNCCD_IDS:
det = self._detectors[detid]
obj = self._detectors[detid]['obj']
data_nda = det['data_method'](obj, evt)
if data_nda is None:
image = None
elif len(data_nda.shape) <= 2:
image = data_nda
elif len(data_nda.shape) == 3:
image = numpy.hstack([numpy.vstack([data_nda[0],data_nda[1][::-1,::-1]]),
numpy.vstack([data_nda[3],data_nda[2][::-1,::-1]])])
add_record(values, det['type'], det['key'], image, ureg.ADU)
elif detid in ACQ_IDS:
det = self._detectors[detid]
# waveforms are in Volts, times are in Seconds
obj = det['obj']
waveforms = obj.waveform(evt)
#print("waveforms", waveforms)
#times = obj.wftime(evt)
for i, wf in enumerate(waveforms):
add_record(values, det['type'], det['keys'][i], wf, ureg.V)
else:
raise RuntimeError('%s not yet supported' % key)
return values
def translate_core(self, evt, key):
"""Returns a dict of Records that matchs a core Hummingbird key.
Core keys include all except: parameters, any psana create key,
any native key."""
values = {}
native_keys = self._c2n[key]
event_keys = evt.keys()
for k in event_keys:
if(k.type() in native_keys):
obj = evt.get(k.type(), k.src(), k.key())
if(isinstance(obj, psana.Bld.BldDataFEEGasDetEnergy) or
isinstance(obj, psana.Bld.BldDataFEEGasDetEnergyV1)):
self._tr_bld_data_fee_gas_det_energy(values, obj)
elif(isinstance(obj, psana.Lusi.IpmFexV1)):
self._tr_lusi_ipm_fex(values, obj, k)
elif(key == 'photonEnergies'):
self._tr_bld_data_ebeam(values, obj)
elif(isinstance(obj, psana.CsPad2x2.ElementV1)):
self._tr_cspad2x2(values, obj)
elif(isinstance(obj, psana.CsPad.DataV2)):
self._tr_cspad(values, obj, k)
# AMO
elif(isinstance(obj, psana.PNCCD.FullFrameV1)):
self._tr_pnccdFullFrame(values, obj, k)
elif(isinstance(obj, psana.PNCCD.FramesV1)):
self._tr_pnccdFrames(values, obj, k)
# --
elif(isinstance(obj, psana.Acqiris.DataDescV1)):
self._tr_acqiris(values, obj, k)
elif(isinstance(obj, psana.Camera.FrameV1)):
self._tr_camera(values, obj)
elif(isinstance(obj, psana.EventId)):
self._tr_event_id(values, obj)
elif(isinstance(obj, psana.EvrData.DataV3) or
isinstance(obj, psana.EvrData.DataV4)):
self._tr_event_codes(values, obj)
else:
print(type(obj))
print(k)
raise RuntimeError('%s not yet supported' % (type(obj)))
return values
def event_id(self, evt):
"""Returns an id which should be unique for each
shot and increase monotonically"""
return self.translate(evt, 'eventID')['Timestamp'].timestamp
def event_id2(self, evt):
"""Returns the LCLS time, a 64-bit integer as an alterative ID"""
return self.translate(evt, 'eventID')['Timestamp'].timestamp2
def _tr_bld_data_ebeam(self, values, obj):
"""Translates BldDataEBeam to hummingbird photon energy and other beam properties"""
try:
photon_energy_ev = obj.ebeamPhotonEnergy()
except AttributeError:
peak_current = obj.ebeamPkCurrBC2()
dl2_energy_gev = 0.001*obj.ebeamL3Energy()
ltu_wake_loss = 0.0016293*peak_current
# Spontaneous radiation loss per segment
sr_loss_per_segment = 0.63*dl2_energy_gev
# wakeloss in an undulator segment
wake_loss_per_segment = 0.0003*peak_current
# energy loss per segment
energy_loss_per_segment = (sr_loss_per_segment +
wake_loss_per_segment)
# energy in first active undulator segment [GeV]
energy_profile = (dl2_energy_gev - 0.001*ltu_wake_loss -
0.0005*energy_loss_per_segment)
# Calculate the resonant photon energy of the first active segment
photon_energy_ev = 44.42*energy_profile*energy_profile
add_record(values, 'photonEnergies', 'photonEnergy', photon_energy_ev, ureg.eV)
try:
ebeam_ang_x = obj.ebeamLTUAngX()
ebeam_ang_y = obj.ebeamLTUAngY()
ebeam_pos_x = obj.ebeamLTUPosX()
ebeam_pos_y = obj.ebeamLTUPosY()
ebeam_charge = obj.ebeamCharge()
add_record(values, 'photonEnergies', 'angX', ebeam_ang_x)
add_record(values, 'photonEnergies', 'angY', ebeam_ang_y)
add_record(values, 'photonEnergies', 'posX', ebeam_pos_x)
add_record(values, 'photonEnergies', 'posY', ebeam_pos_y)
add_record(values, 'photonEnergies', 'charge', ebeam_charge)
except AttributeError:
print("Couldn't translate electron beam properties from BldDataEBeam")
def _tr_bld_data_fee_gas_det_energy(self, values, obj):
"""Translates gas monitor detector to hummingbird pulse energy"""
# convert from mJ to J
add_record(values, 'pulseEnergies', 'f_11_ENRC', obj.f_11_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_12_ENRC', obj.f_12_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_21_ENRC', obj.f_21_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_22_ENRC', obj.f_22_ENRC(), ureg.mJ)
def _tr_lusi_ipm_fex(self, values, obj, evt_key):
"""Translates Ipm relative pulse energy monitor
to hummingbird pulse energy"""
add_record(values, 'pulseEnergies', 'IpmFex - '+str(evt_key.src()), obj.sum(), ureg.ADU)
def _tr_cspad2x2(self, values, obj):
"""Translates CsPad2x2 to hummingbird numpy array"""
try:
add_record(values, 'photonPixelDetectors', 'CsPad2x2S', obj.data(), ureg.ADU)
except AttributeError:
add_record(values, 'photonPixelDetectors', 'CsPad2x2', obj.data16(), ureg.ADU)
def _tr_camera(self, values, obj):
"""Translates Camera frame to hummingbird numpy array"""
#if obj.depth == 16 or obj.depth() == 12:
# data = obj.data16()
# print(data.shape)
#else:
# data = obj.data8()
# print(data.shape)
data = obj.data16()
# off Axis cam at CXI
#if data.shape == (1024,1024):
# add_record(values, 'camera', 'offAxis', data, ureg.ADU)
# MCP (PNCCD replacement) at AMO (June 2016)
if data.shape == (1024,1024):
add_record(values, 'camera', 'mcp', data, ureg.ADU)
if data.shape == (1752,2336):
add_record(values, 'camera', 'onAxis', data, ureg.ADU)
def _tr_cspad(self, values, obj, evt_key):
"""Translates CsPad to hummingbird numpy array, quad by quad"""
n_quads = obj.quads_shape()[0]
for i in range(0, n_quads):
add_record(values, 'photonPixelDetectors', '%sQuad%d' % (self._s2c[str(evt_key.src())], i),
obj.quads(i).data(), ureg.ADU)
def _tr_pnccdFullFrame(self, values, obj, evt_key):
"""Translates full pnCCD frame to hummingbird numpy array"""
add_record(values, 'photonPixelDetectors', '%sfullFrame' % self._s2c[str(evt_key.src())], obj.data(), ureg.ADU)
def _tr_pnccdFrames(self, values, obj, evt_key):
"""Translates pnCCD frames to hummingbird numpy array, frame by frame"""
n_frames = obj.frame_shape()[0]
for i in range(0, n_frames):
add_record(values, 'photonPixelDetectors', '%sFrame%d' % (self._s2c[str(evt_key.src())], i),
obj.frame(i).data(), ureg.ADU)
def _tr_acqiris(self, values, obj, evt_key):
"""Translates Acqiris TOF data to hummingbird numpy array"""
config_store = self.data_source.env().configStore()
acq_config = config_store.get(psana.Acqiris.ConfigV1, evt_key.src())
samp_interval = acq_config.horiz().sampInterval()
n_channels = obj.data_shape()[0]
for i in range(0, n_channels):
vert = acq_config.vert()[i]
elem = obj.data(i)
timestamp = elem.timestamp()[0].value()
raw = elem.waveforms()[0]
if(elem.nbrSamplesInSeg() == 0):
logging.warning("Warning: TOF data for "
"detector %s is missing.", evt_key)
data = raw*vert.slope() - vert.offset()
rec = Record('%s Channel %d' %(self._s2c[str(evt_key.src())], i),
data, ureg.V)
rec.time = (timestamp +
samp_interval * numpy.arange(0, elem.nbrSamplesInSeg()))
values[rec.name] = rec
def _tr_event_id(self, values, obj):
"""Translates LCLS eventID into a hummingbird one"""
timestamp = obj.time()[0]+obj.time()[1]*1e-9
time = datetime.datetime.fromtimestamp(timestamp, tz=timezone('utc'))
time = time.astimezone(tz=timezone('US/Pacific'))
rec = Record('Timestamp', time, ureg.s)
time = datetime.datetime.fromtimestamp(obj.time()[0])
rec.datetime64 = numpy.datetime64(time, 'ns')+obj.time()[1]
rec.fiducials = obj.fiducials()
rec.run = obj.run()
rec.ticks = obj.ticks()
rec.vector = obj.vector()
rec.timestamp = timestamp
rec.timestamp2 = obj.time()[0] << 32 | obj.time()[1]
values[rec.name] = rec
def _tr_event_codes(self, values, obj):
"""Translates LCLS event codes into a hummingbird ones"""
codes = []
for fifo_event in obj.fifoEvents():
codes.append(fifo_event.eventCode())
add_record(values, 'eventCodes', 'EvrEventCodes', codes)
def _tr_epics(self):
"""Returns an EPICSdict that provides access to EPICS parameters.
Check the EPICSdict class for more details.
"""
return EPICSdict(self.data_source.env().epicsStore())
class EPICSdict(object):
"""Provides a dict-like interface to EPICS parameters.
Translated all the parameters is too slow too slow.
Instead parameters are only translated as they are needed,
when they are accessed, using this class.
"""
def __init__(self, epics):
self.epics = epics
self._cache = {}
self._keys = None
def keys(self):
"""Returns available EPICS names"""
if self._keys is None:
self._keys = self.epics.pvNames() + self.epics.aliases()
return self._keys
def len(self):
"""Returns the length of the dictionary"""
return len(self.keys())
def __getitem__(self, key):
"""Calls psana to retrieve and translate the EPICS item"""
if(key not in self._cache):
pv = self.epics.getPV(key)
if(pv is None):
raise KeyError('%s is not a valid EPICS key' %(key))
rec = Record(key, pv.value(0))
rec.pv = pv
self._cache[key] = rec
return self._cache[key]
| SPIhub/hummingbird | src/backend/lcls.py | Python | bsd-2-clause | 28,140 |
plot_data.apply(transform_utm_to_wgs, axis=1) | jorisvandenbossche/DS-python-data-analysis | notebooks/_solutions/case2_observations_processing20.py | Python | bsd-3-clause | 45 |
"""
Test basic DataFrame functionality.
"""
import pandas as pd
import pytest
import weld.grizzly as gr
def get_frames(cls, strings):
"""
Returns two DataFrames for testing binary operators.
The DataFrames have columns of overlapping/different names, types, etc.
"""
df1 = pd.DataFrame({
'name': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'Smith', 'Narayanan', 'Thomas', 'Thaker'],
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = pd.DataFrame({
'firstName': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'smith', 'narayanan', 'Thomas', 'thaker'],
'age': [25, 30, 45, 20, 60, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
if not strings:
df1 = df1.drop(['name', 'lastName'], axis=1)
df2 = df2.drop(['firstName', 'lastName'], axis=1)
return (cls(df1), cls(df2))
def _test_binop(pd_op, gr_op, strings=True):
"""
Test a binary operator.
Binary operators align on column name. For columns that don't exist in both
DataFrames, the column is filled with NaN (for non-comparison operations) and
or False (for comparison operations).
If the RHS is a Series, the Series should be added to all columns.
"""
df1, df2 = get_frames(pd.DataFrame, strings)
gdf1, gdf2 = get_frames(gr.GrizzlyDataFrame, strings)
expect = pd_op(df1, df2)
result = gr_op(gdf1, gdf2).to_pandas()
assert expect.equals(result)
def test_evaluation():
# Test to make sure that evaluating a DataFrame once caches the result/
# doesn't cause another evaluation.
df1 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df3 = (df1 + df2) * df2 + df1 / df2
assert not df3.is_value
df3.evaluate()
assert df3.is_value
weld_value = df3.weld_value
df3.evaluate()
# The same weld_value should be returned.
assert weld_value is df3.weld_value
def test_add():
_test_binop(pd.DataFrame.add, gr.GrizzlyDataFrame.add, strings=False)
def test_sub():
_test_binop(pd.DataFrame.sub, gr.GrizzlyDataFrame.sub, strings=False)
def test_mul():
_test_binop(pd.DataFrame.mul, gr.GrizzlyDataFrame.mul, strings=False)
def test_div():
_test_binop(pd.DataFrame.div, gr.GrizzlyDataFrame.div, strings=False)
def test_eq():
_test_binop(pd.DataFrame.eq, gr.GrizzlyDataFrame.eq, strings=True)
def test_ne():
_test_binop(pd.DataFrame.ne, gr.GrizzlyDataFrame.ne, strings=True)
def test_le():
_test_binop(pd.DataFrame.le, gr.GrizzlyDataFrame.le, strings=False)
def test_lt():
_test_binop(pd.DataFrame.lt, gr.GrizzlyDataFrame.lt, strings=False)
def test_ge():
_test_binop(pd.DataFrame.ge, gr.GrizzlyDataFrame.ge, strings=False)
def test_gt():
_test_binop(pd.DataFrame.gt, gr.GrizzlyDataFrame.gt, strings=False)
| weld-project/weld | weld-python/tests/grizzly/core/test_frame.py | Python | bsd-3-clause | 3,167 |
"""
compressible-specific boundary conditions. Here, in particular, we
implement an HSE BC in the vertical direction.
Note: the pyro BC routines operate on a single variable at a time, so
some work will necessarily be repeated.
Also note: we may come in here with the aux_data (source terms), so
we'll do a special case for them
"""
import compressible.eos as eos
from util import msg
import math
import numpy as np
def user(bc_name, bc_edge, variable, ccdata):
"""
A hydrostatic boundary. This integrates the equation of HSE into
the ghost cells to get the pressure and density under the assumption
that the specific internal energy is constant.
Upon exit, the ghost cells for the input variable will be set
Parameters
----------
bc_name : {'hse'}
The descriptive name for the boundary condition -- this allows
for pyro to have multiple types of user-supplied boundary
conditions. For this module, it needs to be 'hse'.
bc_edge : {'ylb', 'yrb'}
The boundary to update: ylb = lower y boundary; yrb = upper y
boundary.
variable : {'density', 'x-momentum', 'y-momentum', 'energy'}
The variable whose ghost cells we are filling
ccdata : CellCenterData2d object
The data object
"""
myg = ccdata.grid
if bc_name == "hse":
if bc_edge == "ylb":
# lower y boundary
# we will take the density to be constant, the velocity to
# be outflow, and the pressure to be in HSE
if variable in ["density", "x-momentum", "y-momentum", "ymom_src", "E_src", "fuel", "ash"]:
v = ccdata.get_var(variable)
j = myg.jlo-1
while j >= 0:
v[:, j] = v[:, myg.jlo]
j -= 1
elif variable == "energy":
dens = ccdata.get_var("density")
xmom = ccdata.get_var("x-momentum")
ymom = ccdata.get_var("y-momentum")
ener = ccdata.get_var("energy")
grav = ccdata.get_aux("grav")
gamma = ccdata.get_aux("gamma")
dens_base = dens[:, myg.jlo]
ke_base = 0.5*(xmom[:, myg.jlo]**2 + ymom[:, myg.jlo]**2) / \
dens[:, myg.jlo]
eint_base = (ener[:, myg.jlo] - ke_base)/dens[:, myg.jlo]
pres_base = eos.pres(gamma, dens_base, eint_base)
# we are assuming that the density is constant in this
# formulation of HSE, so the pressure comes simply from
# differencing the HSE equation
j = myg.jlo-1
while j >= 0:
pres_below = pres_base - grav*dens_base*myg.dy
rhoe = eos.rhoe(gamma, pres_below)
ener[:, j] = rhoe + ke_base
pres_base = pres_below.copy()
j -= 1
else:
raise NotImplementedError("variable not defined")
elif bc_edge == "yrb":
# upper y boundary
# we will take the density to be constant, the velocity to
# be outflow, and the pressure to be in HSE
if variable in ["density", "x-momentum", "y-momentum", "ymom_src", "E_src", "fuel", "ash"]:
v = ccdata.get_var(variable)
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
v[:, j] = v[:, myg.jhi]
elif variable == "energy":
dens = ccdata.get_var("density")
xmom = ccdata.get_var("x-momentum")
ymom = ccdata.get_var("y-momentum")
ener = ccdata.get_var("energy")
grav = ccdata.get_aux("grav")
gamma = ccdata.get_aux("gamma")
dens_base = dens[:, myg.jhi]
ke_base = 0.5*(xmom[:, myg.jhi]**2 + ymom[:, myg.jhi]**2) / \
dens[:, myg.jhi]
eint_base = (ener[:, myg.jhi] - ke_base)/dens[:, myg.jhi]
pres_base = eos.pres(gamma, dens_base, eint_base)
# we are assuming that the density is constant in this
# formulation of HSE, so the pressure comes simply from
# differencing the HSE equation
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
pres_above = pres_base + grav*dens_base*myg.dy
rhoe = eos.rhoe(gamma, pres_above)
ener[:, j] = rhoe + ke_base
pres_base = pres_above.copy()
else:
raise NotImplementedError("variable not defined")
else:
msg.fail("error: hse BC not supported for xlb or xrb")
elif bc_name == "ramp":
# Boundary conditions for double Mach reflection problem
gamma = ccdata.get_aux("gamma")
if bc_edge == "xlb":
# lower x boundary
# inflow condition with post shock setup
v = ccdata.get_var(variable)
i = myg.ilo - 1
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
val = inflow_post_bc(variable, gamma)
while i >= 0:
v[i, :] = val
i = i - 1
else:
v[:, :] = 0.0 # no source term
elif bc_edge == "ylb":
# lower y boundary
# for x > 1./6., reflective boundary
# for x < 1./6., inflow with post shock setup
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
v = ccdata.get_var(variable)
j = myg.jlo - 1
jj = 0
while j >= 0:
xcen_l = myg.x < 1.0/6.0
xcen_r = myg.x >= 1.0/6.0
v[xcen_l, j] = inflow_post_bc(variable, gamma)
if variable == "y-momentum":
v[xcen_r, j] = -1.0*v[xcen_r, myg.jlo+jj]
else:
v[xcen_r, j] = v[xcen_r, myg.jlo+jj]
j = j - 1
jj = jj + 1
else:
v = ccdata.get_var(variable)
v[:, :] = 0.0 # no source term
elif bc_edge == "yrb":
# upper y boundary
# time-dependent boundary, the shockfront moves with a 10 mach velocity forming an angle
# to the x-axis of 30 degrees clockwise.
# x coordinate of the grid is used to judge whether the cell belongs to pure post shock area,
# the pure pre shock area or the mixed area.
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
v = ccdata.get_var(variable)
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
shockfront_up = 1.0/6.0 + (myg.y[j] + 0.5*myg.dy*math.sqrt(3))/math.tan(math.pi/3.0) \
+ (10.0/math.sin(math.pi/3.0))*ccdata.t
shockfront_down = 1.0/6.0 + (myg.y[j] - 0.5*myg.dy*math.sqrt(3))/math.tan(math.pi/3.0) \
+ (10.0/math.sin(math.pi/3.0))*ccdata.t
shockfront = np.array([shockfront_down, shockfront_up])
for i in range(myg.ihi+myg.ng+1):
v[i, j] = 0.0
cx_down = myg.x[i] - 0.5*myg.dx*math.sqrt(3)
cx_up = myg.x[i] + 0.5*myg.dx*math.sqrt(3)
cx = np.array([cx_down, cx_up])
for sf in shockfront:
for x in cx:
if x < sf:
v[i, j] = v[i, j] + 0.25*inflow_post_bc(variable, gamma)
else:
v[i, j] = v[i, j] + 0.25*inflow_pre_bc(variable, gamma)
else:
v = ccdata.get_var(variable)
v[:, :] = 0.0 # no source term
else:
msg.fail("error: bc type %s not supported" % (bc_name))
def inflow_post_bc(var, g):
# inflow boundary condition with post shock setup
r_l = 8.0
u_l = 7.1447096
v_l = -4.125
p_l = 116.5
if var == "density":
vl = r_l
elif var == "x-momentum":
vl = r_l*u_l
elif var == "y-momentum":
vl = r_l*v_l
elif var == "energy":
vl = p_l/(g - 1.0) + 0.5*r_l*(u_l*u_l + v_l*v_l)
else:
vl = 0.0
return vl
def inflow_pre_bc(var, g):
# pre shock setup
r_r = 1.4
u_r = 0.0
v_r = 0.0
p_r = 1.0
if var == "density":
vl = r_r
elif var == "x-momentum":
vl = r_r*u_r
elif var == "y-momentum":
vl = r_r*v_r
elif var == "energy":
vl = p_r/(g - 1.0) + 0.5*r_r*(u_r*u_r + v_r*v_r)
else:
vl = 0.0
return vl
| zingale/pyro2 | compressible/BC.py | Python | bsd-3-clause | 8,919 |
#!/usr/bin/env vpython3
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for owners_finder.py."""
import os
import sys
import unittest
if sys.version_info.major == 2:
import mock
else:
from unittest import mock
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support import filesystem_mock
import owners_finder
import owners_client
ben = '[email protected]'
brett = '[email protected]'
darin = '[email protected]'
jochen = '[email protected]'
john = '[email protected]'
ken = '[email protected]'
peter = '[email protected]'
tom = '[email protected]'
nonowner = '[email protected]'
def owners_file(*email_addresses, **kwargs):
s = ''
if kwargs.get('comment'):
s += '# %s\n' % kwargs.get('comment')
if kwargs.get('noparent'):
s += 'set noparent\n'
return s + '\n'.join(email_addresses) + '\n'
class TestClient(owners_client.OwnersClient):
def __init__(self):
super(TestClient, self).__init__()
self.owners_by_path = {
'DEPS': [ken, peter, tom],
'base/vlog.h': [ken, peter, tom],
'chrome/browser/defaults.h': [brett, ben, ken, peter, tom],
'chrome/gpu/gpu_channel.h': [ken, ben, brett, ken, peter, tom],
'chrome/renderer/gpu/gpu_channel_host.h': [peter, ben, brett, ken, tom],
'chrome/renderer/safe_browsing/scorer.h': [peter, ben, brett, ken, tom],
'content/content.gyp': [john, darin],
'content/bar/foo.cc': [john, darin],
'content/baz/froboz.h': [brett, john, darin],
'content/baz/ugly.cc': [brett, john, darin],
'content/baz/ugly.h': [brett, john, darin],
'content/common/common.cc': [jochen, john, darin],
'content/foo/foo.cc': [jochen, john, darin],
'content/views/pie.h': [ben, john, self.EVERYONE],
}
def ListOwners(self, path):
path = path.replace(os.sep, '/')
return self.owners_by_path[path]
class OutputInterceptedOwnersFinder(owners_finder.OwnersFinder):
def __init__(
self, files, author, reviewers, client, disable_color=False):
super(OutputInterceptedOwnersFinder, self).__init__(
files, author, reviewers, client, disable_color=disable_color)
self.output = []
self.indentation_stack = []
def resetText(self):
self.output = []
self.indentation_stack = []
def indent(self):
self.indentation_stack.append(self.output)
self.output = []
def unindent(self):
block = self.output
self.output = self.indentation_stack.pop()
self.output.append(block)
def writeln(self, text=''):
self.output.append(text)
class _BaseTestCase(unittest.TestCase):
default_files = [
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h',
'content/views/pie.h'
]
def ownersFinder(self, files, author=nonowner, reviewers=None):
reviewers = reviewers or []
return OutputInterceptedOwnersFinder(
files, author, reviewers, TestClient(), disable_color=True)
def defaultFinder(self):
return self.ownersFinder(self.default_files)
class OwnersFinderTests(_BaseTestCase):
def test_constructor(self):
self.assertNotEqual(self.defaultFinder(), None)
def test_skip_files_owned_by_reviewers(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, reviewers=[brett])
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
def test_skip_files_owned_by_author(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, author=brett)
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
def test_native_path_sep(self):
# Create a path with backslashes on Windows to make sure these are handled.
# This test is a harmless duplicate on other platforms.
native_slashes_path = 'chrome/browser/defaults.h'.replace('/', os.sep)
files = [
native_slashes_path, # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, reviewers=[brett])
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_reset(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
for _ in range(2):
expected = [brett, darin, john, peter, ken, ben, tom]
self.assertEqual(finder.owners_queue, expected)
self.assertEqual(finder.unreviewed_files, {
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h'
})
self.assertEqual(finder.selected_owners, set())
self.assertEqual(finder.deselected_owners, set())
self.assertEqual(finder.reviewed_by, {})
self.assertEqual(finder.output, [])
finder.select_owner(john)
finder.reset()
finder.resetText()
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_select(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
finder.select_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {john})
self.assertEqual(finder.deselected_owners, {darin})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': john,
'content/baz/ugly.cc': john,
'content/baz/ugly.h': john,
'content/content.gyp': john})
self.assertEqual(finder.output,
['Selected: ' + john, 'Deselected: ' + darin])
finder = self.defaultFinder()
finder.select_owner(darin)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {darin})
self.assertEqual(finder.deselected_owners, {john})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': darin,
'content/baz/ugly.cc': darin,
'content/baz/ugly.h': darin,
'content/content.gyp': darin})
self.assertEqual(finder.output,
['Selected: ' + darin, 'Deselected: ' + john])
finder = self.defaultFinder()
finder.select_owner(brett)
expected = [darin, john, peter, ken, tom]
self.assertEqual(finder.owners_queue, expected)
self.assertEqual(finder.selected_owners, {brett})
self.assertEqual(finder.deselected_owners, {ben})
self.assertEqual(finder.reviewed_by,
{'chrome/browser/defaults.h': brett,
'chrome/gpu/gpu_channel.h': brett,
'chrome/renderer/gpu/gpu_channel_host.h': brett,
'chrome/renderer/safe_browsing/scorer.h': brett,
'content/baz/ugly.cc': brett,
'content/baz/ugly.h': brett})
self.assertEqual(finder.output,
['Selected: ' + brett, 'Deselected: ' + ben])
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_deselect(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
finder.deselect_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {darin})
self.assertEqual(finder.deselected_owners, {john})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': darin,
'content/baz/ugly.cc': darin,
'content/baz/ugly.h': darin,
'content/content.gyp': darin})
self.assertEqual(finder.output,
['Deselected: ' + john, 'Selected: ' + darin])
def test_print_file_info(self):
finder = self.defaultFinder()
finder.print_file_info('chrome/browser/defaults.h')
self.assertEqual(finder.output, ['chrome/browser/defaults.h [5]'])
finder.resetText()
finder.print_file_info('chrome/renderer/gpu/gpu_channel_host.h')
self.assertEqual(finder.output,
['chrome/renderer/gpu/gpu_channel_host.h [5]'])
def test_print_file_info_detailed(self):
finder = self.defaultFinder()
finder.print_file_info_detailed('chrome/browser/defaults.h')
self.assertEqual(finder.output,
['chrome/browser/defaults.h',
[ben, brett, ken, peter, tom]])
finder.resetText()
finder.print_file_info_detailed('chrome/renderer/gpu/gpu_channel_host.h')
self.assertEqual(finder.output,
['chrome/renderer/gpu/gpu_channel_host.h',
[ben, brett, ken, peter, tom]])
if __name__ == '__main__':
unittest.main()
| CoherentLabs/depot_tools | tests/owners_finder_test.py | Python | bsd-3-clause | 9,645 |
from activitystreams import Activity, Object, MediaLink, ActionLink, Link
import re
import datetime
import time
class AtomActivity(Activity):
pass
# This is a weird enum-like thing.
class ObjectParseMode(object):
def __init__(self, reprstring):
self.reprstring = reprstring
def __repr__(self):
return self.reprstring
ObjectParseMode.ATOM_ENTRY = ObjectParseMode("ObjectParseMode.ATOM_ENTRY")
ObjectParseMode.ATOM_AUTHOR = ObjectParseMode("ObjectParseMode.ATOM_AUTHOR")
ObjectParseMode.ACTIVITY_OBJECT = ObjectParseMode("ObjectParseMode.ACTIVITY_OBJECT")
ATOM_PREFIX = "{http://www.w3.org/2005/Atom}"
ACTIVITY_PREFIX = "{http://activitystrea.ms/spec/1.0/}"
MEDIA_PREFIX = "{http://purl.org/syndication/atommedia}"
ATOM_FEED = ATOM_PREFIX + "feed"
ATOM_ENTRY = ATOM_PREFIX + "entry"
ATOM_ID = ATOM_PREFIX + "id"
ATOM_AUTHOR = ATOM_PREFIX + "author"
ATOM_SOURCE = ATOM_PREFIX + "source"
ATOM_TITLE = ATOM_PREFIX + "title"
ATOM_SUMMARY = ATOM_PREFIX + "summary"
ATOM_CONTENT = ATOM_PREFIX + "content"
ATOM_LINK = ATOM_PREFIX + "link"
ATOM_PUBLISHED = ATOM_PREFIX + "published"
ATOM_NAME = ATOM_PREFIX + "name"
ATOM_URI = ATOM_PREFIX + "uri"
ATOM_GENERATOR = ATOM_PREFIX + "generator"
ATOM_ICON = ATOM_PREFIX + "icon"
ACTIVITY_SUBJECT = ACTIVITY_PREFIX + "subject"
ACTIVITY_OBJECT = ACTIVITY_PREFIX + "object"
ACTIVITY_OBJECT_TYPE = ACTIVITY_PREFIX + "object-type"
ACTIVITY_VERB = ACTIVITY_PREFIX + "verb"
ACTIVITY_TARGET = ACTIVITY_PREFIX + "target"
ACTIVITY_ACTOR = ACTIVITY_PREFIX + "actor"
POST_VERB = "http://activitystrea.ms/schema/1.0/post"
MEDIA_WIDTH = MEDIA_PREFIX + "width"
MEDIA_HEIGHT = MEDIA_PREFIX + "height"
MEDIA_DURATION = MEDIA_PREFIX + "duration"
MEDIA_DESCRIPTION = MEDIA_PREFIX + "description"
def make_activities_from_feed(et):
feed_elem = et.getroot()
entry_elems = feed_elem.findall(ATOM_ENTRY)
activities = []
for entry_elem in entry_elems:
activities.extend(make_activities_from_entry(entry_elem, feed_elem))
return activities
def make_activities_from_entry(entry_elem, feed_elem):
object_elems = entry_elem.findall(ACTIVITY_OBJECT)
activity_is_implied = False
if len(object_elems) == 0:
# Implied activity, so the entry itself represents the object.
activity_is_implied = True
object_elems = [ entry_elem ]
author_elem = entry_elem.find(ATOM_AUTHOR)
if author_elem is None:
source_elem = entry_elem.find(ATOM_SOURCE)
if source_elem is not None:
author_elem = source_elem.find(ATOM_AUTHOR)
if author_elem is None:
author_elem = feed_elem.find(ATOM_AUTHOR)
target_elem = entry_elem.find(ACTIVITY_TARGET)
published_elem = entry_elem.find(ATOM_PUBLISHED)
published_datetime = None
if published_elem is not None:
published_w3cdtf = published_elem.text
published_datetime = _parse_date_w3cdtf(published_w3cdtf)
verb_elem = entry_elem.find(ACTIVITY_VERB)
verb = None
if verb_elem is not None:
verb = verb_elem.text
else:
verb = POST_VERB
generator_elem = entry_elem.find(ATOM_GENERATOR)
icon_url = None
icon_elem = entry_elem.find(ATOM_ICON)
if icon_elem is not None:
icon_url = icon_elem.text
target = None
if target_elem:
target = make_object_from_elem(target_elem, feed_elem, ObjectParseMode.ACTIVITY_OBJECT)
actor = None
if author_elem:
actor = make_object_from_elem(author_elem, feed_elem, ObjectParseMode.ATOM_AUTHOR)
activities = []
for object_elem in object_elems:
if activity_is_implied:
object = make_object_from_elem(object_elem, feed_elem, ObjectParseMode.ATOM_ENTRY)
else:
object = make_object_from_elem(object_elem, feed_elem, ObjectParseMode.ACTIVITY_OBJECT)
activity = Activity(object=object, actor=actor, target=target, verb=verb, time=published_datetime, icon_url=icon_url)
activities.append(activity)
return activities
def make_object_from_elem(object_elem, feed_elem, mode):
id = None
id_elem = object_elem.find(ATOM_ID)
if id_elem is not None:
id = id_elem.text
summary = None
summary_elem = object_elem.find(ATOM_SUMMARY)
if summary_elem is not None:
summary = summary_elem.text
name_tag_name = ATOM_TITLE
# The ATOM_AUTHOR parsing mode looks in atom:name instead of atom:title
if mode == ObjectParseMode.ATOM_AUTHOR:
name_tag_name = ATOM_NAME
name = None
name_elem = object_elem.find(name_tag_name)
if name_elem is not None:
name = name_elem.text
url = None
image = None
for link_elem in object_elem.findall(ATOM_LINK):
type = link_elem.get("type")
rel = link_elem.get("rel")
if rel is None or rel == "alternate":
if type is None or type == "text/html":
url = link_elem.get("href")
if rel == "preview":
if type is None or type == "image/jpeg" or type == "image/gif" or type == "image/png":
# FIXME: Should pull out the width/height/duration attributes from AtomMedia too.
image = MediaLink(url=link_elem.get("href"))
# In the atom:author parse mode we fall back on atom:uri if there's no link rel="alternate"
if url is None and mode == ObjectParseMode.ATOM_AUTHOR:
uri_elem = object_elem.find(ATOM_URI)
if uri_elem is not None:
url = uri_elem.text
object_type_elem = object_elem.find(ACTIVITY_OBJECT_TYPE)
object_type = None
if object_type_elem is not None:
object_type = object_type_elem.text
return Object(id=id, name=name, url=url, object_type=object_type, image=image, summary=summary)
# This is pilfered from Universal Feed Parser.
def _parse_date_w3cdtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(float(seconds))
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return datetime.datetime.utcfromtimestamp(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
| lmorchard/badger | libs/activitystreams/atom.py | Python | bsd-3-clause | 8,938 |
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/\
Johnson%E2%80%93Lindenstrauss_lemma
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.fixes import parse_version
# `normed` is being deprecated in favor of `density` in histograms
if parse_version(matplotlib.__version__) >= parse_version('2.1'):
density_param = {'density': True}
else:
density_param = {'normed': True}
# %%
# Theoretical bounds
# ==================
# The distortion introduced by a random projection `p` is asserted by
# the fact that `p` is defining an eps-embedding with good probability
# as defined by:
#
# .. math::
# (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
#
# Where u and v are any rows taken from a dataset of shape [n_samples,
# n_features] and p is a projection by a random Gaussian N(0, 1) matrix
# with shape [n_components, n_features] (or a sparse Achlioptas matrix).
#
# The minimum number of components to guarantees the eps-embedding is
# given by:
#
# .. math::
# n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
#
#
# The first plot shows that with an increasing number of samples ``n_samples``,
# the minimal number of dimensions ``n_components`` increased logarithmically
# in order to guarantee an ``eps``-embedding.
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
# %%
# The second plot shows that an increase of the admissible
# distortion ``eps`` allows to reduce drastically the minimal number of
# dimensions ``n_components`` for a given number of samples ``n_samples``
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
# %%
# Empirical validation
# ====================
#
# We validate the above bounds on the 20 newsgroups text document
# (TF-IDF word frequencies) dataset or on the digits dataset:
#
# - for the 20 newsgroups dataset some 500 documents with 100k
# features in total are projected using a sparse random matrix to smaller
# euclidean spaces with various values for the target number of dimensions
# ``n_components``.
#
# - for the digits dataset, some 8x8 gray level pixels data for 500
# handwritten digits pictures are randomly projected to spaces for various
# larger number of dimensions ``n_components``.
#
# The default dataset is the 20 newsgroups dataset. To run the example on the
# digits dataset, pass the ``--use-digits-dataset`` command line argument to
# this script.
if '--use-digits-dataset' in sys.argv:
data = load_digits().data[:500]
else:
data = fetch_20newsgroups_vectorized().data[:500]
# %%
# For each value of ``n_components``, we plot:
#
# - 2D distribution of sample pairs with pairwise distances in original
# and projected spaces as x and y axis respectively.
#
# - 1D histogram of the ratio of those distances (projected / original).
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
min_dist = min(projected_dists.min(), dists.min())
max_dist = max(projected_dists.max(), dists.max())
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu,
extent=[min_dist, max_dist, min_dist, max_dist])
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, range=(0., 2.), edgecolor='k', **density_param)
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
# %%
# We can see that for low values of ``n_components`` the distribution is wide
# with many distorted pairs and a skewed distribution (due to the hard
# limit of zero ratio on the left as distances are always positives)
# while for larger values of n_components the distortion is controlled
# and the distances are well preserved by the random projection.
# %%
# Remarks
# =======
#
# According to the JL lemma, projecting 500 samples without too much distortion
# will require at least several thousands dimensions, irrespective of the
# number of features of the original dataset.
#
# Hence using random projections on the digits dataset which only has 64
# features in the input space does not make sense: it does not allow
# for dimensionality reduction in this case.
#
# On the twenty newsgroups on the other hand the dimensionality can be
# decreased from 56436 down to 10000 while reasonably preserving
# pairwise distances.
| bnaul/scikit-learn | examples/miscellaneous/plot_johnson_lindenstrauss_bound.py | Python | bsd-3-clause | 7,785 |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import sys
from socket import error
from twisted.internet import reactor
from rosbridge_server import RosbridgeUdpSocket,RosbridgeUdpFactory
def shutdown_hook():
reactor.stop()
if __name__ == "__main__":
rospy.init_node("rosbridge_websocket")
rospy.on_shutdown(shutdown_hook) # register shutdown hook to stop the server
##################################################
# Parameter handling #
##################################################
# get RosbridgeProtocol parameters
RosbridgeUdpSocket.fragment_timeout = rospy.get_param('~fragment_timeout',
RosbridgeUdpSocket.fragment_timeout)
RosbridgeUdpSocket.delay_between_messages = rospy.get_param('~delay_between_messages',
RosbridgeUdpSocket.delay_between_messages)
RosbridgeUdpSocket.max_message_size = rospy.get_param('~max_message_size',
RosbridgeUdpSocket.max_message_size)
if RosbridgeUdpSocket.max_message_size == "None":
RosbridgeUdpSocket.max_message_size = None
# if authentication should be used
RosbridgeUdpSocket.authenticate = rospy.get_param('~authenticate', False)
port = rospy.get_param('~port', 9090)
interface = rospy.get_param('~interface', "")
if "--port" in sys.argv:
idx = sys.argv.index("--port")+1
if idx < len(sys.argv):
port = int(sys.argv[idx])
else:
print "--port argument provided without a value."
sys.exit(-1)
if "--interface" in sys.argv:
idx = sys.argv.index("--interface")+1
if idx < len(sys.argv):
interface = int(sys.argv[idx])
else:
print "--interface argument provided without a value."
sys.exit(-1)
if "--fragment_timeout" in sys.argv:
idx = sys.argv.index("--fragment_timeout") + 1
if idx < len(sys.argv):
RosbridgeUdpSocket.fragment_timeout = int(sys.argv[idx])
else:
print "--fragment_timeout argument provided without a value."
sys.exit(-1)
if "--delay_between_messages" in sys.argv:
idx = sys.argv.index("--delay_between_messages") + 1
if idx < len(sys.argv):
RosbridgeUdpSocket.delay_between_messages = float(sys.argv[idx])
else:
print "--delay_between_messages argument provided without a value."
sys.exit(-1)
if "--max_message_size" in sys.argv:
idx = sys.argv.index("--max_message_size") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeUdpSocket.max_message_size = None
else:
RosbridgeUdpSocket.max_message_size = int(value)
else:
print "--max_message_size argument provided without a value. (can be None or <Integer>)"
sys.exit(-1)
##################################################
# Done with parameter handling #
##################################################
rospy.loginfo("Rosbridge UDP server started on port %d", port)
reactor.listenUDP(port, RosbridgeUdpFactory(), interface=interface)
reactor.run()
| vladrotea/rosbridge_suite | rosbridge_server/scripts/rosbridge_udp.py | Python | bsd-3-clause | 4,996 |
from bson import ObjectId
import simplejson as json
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
from eve.tests.utils import DummyEvent
from eve import STATUS_OK, LAST_UPDATED, ID_FIELD, ISSUES, STATUS, ETAG
from eve.methods.patch import patch_internal
class TestPatch(TestBase):
def test_patch_to_resource_endpoint(self):
_, status = self.patch(self.known_resource_url, data={})
self.assert405(status)
def test_readonly_resource(self):
_, status = self.patch(self.readonly_id_url, data={})
self.assert405(status)
def test_unknown_id(self):
_, status = self.patch(self.unknown_item_id_url,
data={"key1": 'value1'})
self.assert404(status)
def test_unknown_id_different_resource(self):
# patching a 'user' with a valid 'contact' id will 404
_, status = self.patch('%s/%s/' % (self.different_resource,
self.item_id),
data={"key1": "value1"})
self.assert404(status)
# of course we can still patch a 'user'
_, status = self.patch('%s/%s/' % (self.different_resource,
self.user_id),
data={'key1': '{"username": "username1"}'},
headers=[('If-Match', self.user_etag)])
self.assert200(status)
def test_by_name(self):
_, status = self.patch(self.item_name_url, data={'key1': 'value1'})
self.assert405(status)
def test_ifmatch_missing(self):
_, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert403(status)
def test_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
r, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert200(status)
self.assertTrue(ETAG not in r)
def test_ifmatch_bad_etag(self):
_, status = self.patch(self.item_id_url,
data={'key1': 'value1'},
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_unique_value(self):
# TODO
# for the time being we are happy with testing only Eve's custom
# validation. We rely on Cerberus' own test suite for other validation
# unit tests. This test also makes sure that response status is
# syntatically correcy in case of validation issues.
# We should probably test every single case as well (seems overkill).
r, status = self.patch(self.item_id_url,
data={"ref": "%s" % self.alt_ref},
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'ref': "value '%s' is not unique" %
self.alt_ref})
def test_patch_string(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_integer(self):
field = "prog"
test_value = 9999
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_list_as_array(self):
field = "role"
test_value = ["vendor", "client"]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertTrue(set(test_value).issubset(db_value))
def test_patch_rows(self):
field = "rows"
test_value = [
{'sku': 'AT1234', 'price': 99},
{'sku': 'XF9876', 'price': 9999}
]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
for test_item in test_value:
self.assertTrue(test_item in db_value)
def test_patch_list(self):
field = "alist"
test_value = ["a_string", 99]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_dict(self):
field = "location"
test_value = {'address': 'an address', 'city': 'a city'}
changes = {field: test_value}
original_city = []
def keep_original_city(resource_name, updates, original):
original_city.append(original['location']['city'])
self.app.on_update += keep_original_city
self.app.on_updated += keep_original_city
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
self.assertEqual(original_city[0], original_city[1])
def test_patch_datetime(self):
field = "born"
test_value = "Tue, 06 Nov 2012 10:33:31 GMT"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_objectid(self):
field = "tid"
test_value = "4f71c129c88e2018d4000000"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_null_objectid(self):
# verify that #341 is fixed.
field = "tid"
test_value = None
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_defaults(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title', r)
def test_patch_defaults_with_post_override(self):
field = "ref"
test_value = "1234567890123456789012345"
r = self.perform_patch_with_post_override(field, test_value)
self.assert200(r.status_code)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title',
json.loads(r.get_data()))
def test_patch_multiple_fields(self):
fields = ['ref', 'prog', 'role']
test_values = ["9876543210987654321054321", 123, ["agent"]]
changes = {"ref": test_values[0], "prog": test_values[1],
"role": test_values[2]}
r = self.perform_patch(changes)
db_values = self.compare_patch_with_get(fields, r)
for i in range(len(db_values)):
self.assertEqual(db_values[i], test_values[i])
def test_patch_with_post_override(self):
# a POST request with PATCH override turns into a PATCH request
r = self.perform_patch_with_post_override('prog', 1)
self.assert200(r.status_code)
def test_patch_internal(self):
# test that patch_internal is available and working properly.
test_field = 'ref'
test_value = "9876543210987654321098765"
data = {test_field: test_value}
with self.app.test_request_context(self.item_id_url):
r, _, _, status = patch_internal(
self.known_resource, data, concurrency_check=False,
**{'_id': self.item_id})
db_value = self.compare_patch_with_get(test_field, r)
self.assertEqual(db_value, test_value)
self.assert200(status)
def test_patch_etag_header(self):
# test that Etag is always includer with response header. See #562.
changes = {"ref": "1234567890123456789012345"}
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
r = self.test_client.patch(self.item_id_url,
data=json.dumps(changes),
headers=headers)
self.assertTrue('Etag' in r.headers)
def test_patch_nested(self):
changes = {'location.city': 'a nested city',
'location.address': 'a nested address'}
r = self.perform_patch(changes)
values = self.compare_patch_with_get('location', r)
self.assertEqual(values['city'], 'a nested city')
self.assertEqual(values['address'], 'a nested address')
def perform_patch(self, changes):
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
return r
def perform_patch_with_post_override(self, field, value):
headers = [('X-HTTP-Method-Override', 'PATCH'),
('If-Match', self.item_etag),
('Content-Type', 'application/json')]
return self.test_client.post(self.item_id_url,
data=json.dumps({field: value}),
headers=headers)
def compare_patch_with_get(self, fields, patch_response):
raw_r = self.test_client.get(self.item_id_url)
r, status = self.parse_response(raw_r)
self.assert200(status)
self.assertEqual(raw_r.headers.get('ETag'),
patch_response[ETAG])
if isinstance(fields, str):
return r[fields]
else:
return [r[field] for field in fields]
def test_patch_allow_unknown(self):
changes = {"unknown": "unknown"}
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'unknown': 'unknown field'})
self.app.config['DOMAIN'][self.known_resource]['allow_unknown'] = True
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
def test_patch_x_www_form_urlencoded(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
headers = [('If-Match', self.item_etag)]
r, status = self.parse_response(self.test_client.patch(
self.item_id_url, data=changes, headers=headers))
self.assert200(status)
self.assertTrue('OK' in r[STATUS])
def test_patch_referential_integrity(self):
data = {"person": self.unknown_item_id}
headers = [('If-Match', self.invoice_etag)]
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.app.config['ID_FIELD']))
self.assertValidationError(r, {'person': expected})
data = {"person": self.item_id}
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(r, self.invoice_id)
def test_patch_write_concern_success(self):
# 0 and 1 are the only valid values for 'w' on our mongod instance (1
# is the default)
self.domain['contacts']['mongo_write_concern'] = {'w': 0}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
def test_patch_write_concern_fail(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_patch_missing_standard_date_fields(self):
"""Documents created outside the API context could be lacking the
LAST_UPDATED and/or DATE_CREATED fields.
"""
# directly insert a document, without DATE_CREATED e LAST_UPDATED
# values.
contacts = self.random_contacts(1, False)
ref = 'test_update_field'
contacts[0]['ref'] = ref
_db = self.connection[MONGO_DBNAME]
_db.contacts.insert(contacts)
# now retrieve same document via API and get its etag, which is
# supposed to be computed on default DATE_CREATED and LAST_UPDATAED
# values.
response, status = self.get(self.known_resource, item=ref)
etag = response[ETAG]
_id = response['_id']
# attempt a PATCH with the new etag.
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch('%s/%s' % (self.known_resource_url, _id),
data=changes, headers=[('If-Match', etag)])
self.assert200(status)
def test_patch_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
data = {"inv_number": "new_number"}
headers = [('If-Match', etag)]
response, status = self.patch('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(response, self.invoice_id)
def test_patch_bandwidth_saver(self):
changes = {'ref': '1234567890123456789012345'}
# bandwidth_saver is on by default
self.assertTrue(self.app.config['BANDWIDTH_SAVER'])
r = self.perform_patch(changes)
self.assertFalse('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
self.item_etag = r[self.app.config['ETAG']]
# test return all fields (bandwidth_saver off)
self.app.config['BANDWIDTH_SAVER'] = False
r = self.perform_patch(changes)
self.assertTrue('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
def test_patch_readonly_field_with_previous_document(self):
schema = self.domain['contacts']['schema']
del(schema['ref']['required'])
# disable read-only on the field so we can store a value which is
# also different form its default value.
schema['read_only_field']['readonly'] = False
changes = {'read_only_field': 'value'}
r = self.perform_patch(changes)
# resume read-only status for the field
self.domain['contacts']['schema']['read_only_field']['readonly'] = True
# test that if the read-only field is included with the payload and its
# value is equal to the one stored with the document, validation
# succeeds (#479).
etag = r['_etag']
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
# test that if the read-only field is included with the payload and its
# value is different from the stored document, validation fails.
etag = r['_etag']
changes = {'read_only_field': 'another value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert422(status)
self.assertTrue('is read-only' in r['_issues']['read_only_field'])
def test_patch_nested_document_not_overwritten(self):
""" Test that nested documents are not overwritten on PATCH and #519
is fixed.
"""
schema = {
'sensor': {
"type": "dict",
"schema": {
"name": {"type": "string"},
"lon": {"type": "float"},
"lat": {"type": "float"},
"value": {"type": "float", "default": 10.3},
"dict": {
'type': 'dict',
'schema': {
'string': {'type': 'string'},
'int': {'type': 'integer'},
}
}
}
},
'test': {
'type': 'string',
'readonly': True,
'default': 'default'
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {
'sensor': {
'name': 'device_name',
'lon': 43.4,
'lat': 1.31,
'dict': {'int': 99}
}
}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag, value, test, int = (
r[ID_FIELD],
r[ETAG],
r['sensor']['value'],
r['test'],
r['sensor']['dict']['int']
)
changes = {
'sensor': {
'lon': 10.0,
'dict': {'string': 'hi'}
}
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
etag, value, int = (
r[ETAG],
r['sensor']['value'],
r['sensor']['dict']['int']
)
self.assertEqual(value, 10.3)
self.assertEqual(test, 'default')
self.assertEqual(int, 99)
def test_patch_nested_document_nullable_missing(self):
schema = {
'sensor': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
'default': None,
},
'other': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag = r[ID_FIELD], r[ETAG]
self.assertTrue('sensor' in r)
self.assertEqual(r['sensor'], None)
self.assertFalse('other' in r)
changes = {
'sensor': {'name': 'device_name'},
'other': {'name': 'other_name'},
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
self.assertEqual(r['sensor'], {'name': 'device_name'})
self.assertEqual(r['other'], {'name': 'other_name'})
def test_patch_dependent_field_on_origin_document(self):
""" Test that when patching a field which is dependent on another and
this other field is not provided with the patch but is still present
on the target document, the patch will be accepted. See #363.
"""
# this will fail as dependent field is missing even in the
# document we are trying to update.
del(self.domain['contacts']['schema']['dependency_field1']['default'])
del(self.domain['contacts']['defaults']['dependency_field1'])
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert422(status)
# update the stored document by adding dependency field.
changes = {'dependency_field1': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
# now the field2 update will be accepted as the dependency field is
# present in the stored document already.
etag = r['_etag']
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
def assertPatchResponse(self, response, item_id):
self.assertTrue(STATUS in response)
self.assertTrue(STATUS_OK in response[STATUS])
self.assertFalse(ISSUES in response)
self.assertTrue(ID_FIELD in response)
self.assertEqual(response[ID_FIELD], item_id)
self.assertTrue(LAST_UPDATED in response)
self.assertTrue(ETAG in response)
self.assertTrue('_links' in response)
self.assertItemLink(response['_links'], item_id)
def patch(self, url, data, headers=[]):
headers.append(('Content-Type', 'application/json'))
r = self.test_client.patch(url,
data=json.dumps(data),
headers=headers)
return self.parse_response(r)
class TestEvents(TestBase):
new_ref = "0123456789012345678901234"
def test_on_pre_PATCH(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_pre_PATCH_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_PATCH_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_PATCH += filter_this
# Would normally patch the known document; will return 404 instead.
r, s = self.parse_response(self.patch())
self.assert404(s)
def test_on_post_PATCH(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(200, devent.called[2].status_code)
self.assertEqual(3, len(devent.called))
def test_on_post_PATCH_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH_contacts += devent
self.patch()
self.assertEqual(200, devent.called[1].status_code)
self.assertEqual(2, len(devent.called))
def test_on_update(self):
devent = DummyEvent(self.before_update)
self.app.on_update += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_update_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_update_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_updated(self):
devent = DummyEvent(self.after_update)
self.app.on_updated += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_updated_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_updated_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def before_update(self):
db = self.connection[MONGO_DBNAME]
contact = db.contacts.find_one(ObjectId(self.item_id))
return contact['ref'] == self.item_name
def after_update(self):
return not self.before_update()
def patch(self):
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
data = json.dumps({"ref": self.new_ref})
return self.test_client.patch(
self.item_id_url, data=data, headers=headers)
| jzorrof/eve | eve/tests/methods/patch.py | Python | bsd-3-clause | 25,507 |
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import datetime
import email.mime.text
import getpass
import os
import re
import smtplib
import subprocess
import sys
import tempfile
import urllib2
BUILD_DIR = os.path.dirname(__file__)
NACL_DIR = os.path.dirname(BUILD_DIR)
TOOLCHAIN_REV_DIR = os.path.join(NACL_DIR, 'toolchain_revisions')
PKG_VER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
PKGS = ['pnacl_newlib', 'pnacl_translator']
REV_FILES = [os.path.join(TOOLCHAIN_REV_DIR, '%s.json' % package)
for package in PKGS]
def ParseArgs(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Update pnacl_newlib.json PNaCl version.
LLVM and other projects are checked-in to the NaCl repository, but their
head isn't necessarily the one that we currently use in PNaCl. The
pnacl_newlib.json and pnacl_translator.json files point at subversion
revisions to use for tools such as LLVM. Our build process then
downloads pre-built tool tarballs from the toolchain build waterfall.
git repository before running this script:
______________________
| |
v |
...----A------B------C------D------ NaCl HEAD
^ ^ ^ ^
| | | |__ Latest pnacl_{newlib,translator}.json update.
| | |
| | |__ A newer LLVM change (LLVM repository HEAD).
| |
| |__ Oldest LLVM change since this PNaCl version.
|
|__ pnacl_{newlib,translator}.json points at an older LLVM change.
git repository after running this script:
_______________
| |
v |
...----A------B------C------D------E------ NaCl HEAD
Note that there could be any number of non-PNaCl changes between each of
these changelists, and that the user can also decide to update the
pointer to B instead of C.
There is further complication when toolchain builds are merged.
""")
parser.add_argument('--email', metavar='ADDRESS', type=str,
default=getpass.getuser()+'@chromium.org',
help="Email address to send errors to.")
parser.add_argument('--svn-id', metavar='SVN_ID', type=int, default=0,
help="Update to a specific SVN ID instead of the most "
"recent SVN ID with a PNaCl change. This value must "
"be more recent than the one in the current "
"pnacl_newlib.json. This option is useful when multiple "
"changelists' toolchain builds were merged, or when "
"too many PNaCl changes would be pulled in at the "
"same time.")
parser.add_argument('--dry-run', default=False, action='store_true',
help="Print the changelist that would be sent, but "
"don't actually send anything to review.")
# TODO(jfb) The following options come from download_toolchain.py and
# should be shared in some way.
parser.add_argument('--filter_out_predicates', default=[],
help="Toolchains to filter out.")
return parser.parse_args()
def ExecCommand(command):
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.stderr.write('\nRunning `%s` returned %i, got:\n%s\n' %
(' '.join(e.cmd), e.returncode, e.output))
raise
def GetCurrentRevision():
return [ExecCommand([sys.executable, PKG_VER,
'getrevision',
'--revision-package', package]).strip()
for package in PKGS]
def SetCurrentRevision(revision_num):
for package in PKGS:
ExecCommand([sys.executable, PKG_VER] +
# TODO(dschuff) pnacl_newlib shouldn't use cloud-bucket
# once we switch fully to toolchain_build.
(['--cloud-bucket', 'nativeclient-archive2/pnacl_buildsh'] if
package == 'pnacl_newlib' else []) +
['setrevision',
'--revision-package', package,
'--revision', str(revision_num)])
def GitCurrentBranch():
return ExecCommand(['git', 'symbolic-ref', 'HEAD', '--short']).strip()
def GitStatus():
"""List of statuses, one per path, of paths in the current git branch.
Ignores untracked paths."""
out = ExecCommand(['git', 'status', '--porcelain']).strip().split('\n')
return [f.strip() for f in out if not re.match('^\?\? (.*)$', f.strip())]
def SyncSources():
"""Assumes a git-svn checkout of NaCl. See:
www.chromium.org/nativeclient/how-tos/how-to-use-git-svn-with-native-client
"""
ExecCommand(['gclient', 'sync'])
def GitCommitInfo(info='', obj=None, num=None, extra=[]):
"""Commit information, where info is one of the shorthands in git_formats.
obj can be a path or a hash.
num is the number of results to return.
extra is a list of optional extra arguments."""
# Shorthands for git's pretty formats.
# See PRETTY FORMATS format:<string> in `git help log`.
git_formats = {
'': '',
'hash': '%H',
'date': '%ci',
'author': '%aN',
'subject': '%s',
'body': '%b',
}
cmd = ['git', 'log', '--format=format:%s' % git_formats[info]] + extra
if num: cmd += ['-n'+str(num)]
if obj: cmd += [obj]
return ExecCommand(cmd).strip()
def GitCommitsSince(date):
"""List of commit hashes since a particular date,
in reverse chronological order."""
return GitCommitInfo(info='hash',
extra=['--since="%s"' % date]).split('\n')
def GitFilesChanged(commit_hash):
"""List of files changed in a commit."""
return GitCommitInfo(obj=commit_hash, num=1,
extra=['--name-only']).split('\n')
def GitChangesPath(commit_hash, path):
"""Returns True if the commit changes a file under the given path."""
return any([
re.search('^' + path, f.strip()) for f in
GitFilesChanged(commit_hash)])
def GitBranchExists(name):
return len(ExecCommand(['git', 'branch', '--list', name]).strip()) != 0
def GitCheckout(branch, force=False):
"""Checkout an existing branch.
force throws away local changes."""
ExecCommand(['git', 'checkout'] +
(['--force'] if force else []) +
[branch])
def GitCheckoutNewBranch(branch):
"""Create and checkout a new git branch."""
ExecCommand(['git', 'checkout', '-b', branch])
def GitDeleteBranch(branch, force=False):
"""Force-delete a branch."""
ExecCommand(['git', 'branch', '-D' if force else '-d', branch])
def GitAdd(file):
ExecCommand(['git', 'add', file])
def GitCommit(message):
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(message)
tmp.flush()
ExecCommand(['git', 'commit', '--file=%s' % tmp.name])
def UploadChanges():
"""Upload changes, don't prompt."""
# TODO(jfb) Using the commit queue and avoiding git try + manual commit
# would be much nicer. See '--use-commit-queue'
return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f'])
def GitTry():
return ExecCommand(['git', 'try'])
def FindCommitWithGitSvnId(git_svn_id):
while True:
# This command needs to retry because git-svn partially rebuild its
# revision map for every commit. Asking it a second time fixes the
# issue.
out = ExecCommand(['git', 'svn', 'find-rev', 'r' + git_svn_id]).strip()
if not re.match('^Partial-rebuilding ', out):
break
return out
def CommitMessageToCleanDict(commit_message):
"""Extract and clean commit message fields that follow the NaCl commit
message convention. Don't repeat them as-is, to avoid confusing our
infrastructure."""
res = {}
fields = [
['git svn id', ('\s*git-svn-id: '
'svn://[^@]+@([0-9]+) [a-f0-9\-]+'), '<none>'],
['reviewers tbr', '\s*TBR=([^\n]+)', ''],
['reviewers', '\s*R=([^\n]+)', ''],
['review url', '\s*Review URL: *([^\n]+)', '<none>'],
['bug', '\s*BUG=([^\n]+)', '<none>'],
['test', '\s*TEST=([^\n]+)', '<none>'],
]
for key, regex, none in fields:
found = re.search(regex, commit_message)
if found:
commit_message = commit_message.replace(found.group(0), '')
res[key] = found.group(1).strip()
else:
res[key] = none
res['body'] = commit_message.strip()
return res
def SendEmail(user_email, out):
if user_email:
sys.stderr.write('\nSending email to %s.\n' % user_email)
msg = email.mime.text.MIMEText(out)
msg['Subject'] = '[PNaCl revision updater] failure!'
msg['From'] = '[email protected]'
msg['To'] = user_email
s = smtplib.SMTP('localhost')
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
else:
sys.stderr.write('\nNo email address specified.')
def DryRun(out):
sys.stdout.write("DRY RUN: " + out + "\n")
def Done(out):
sys.stdout.write(out)
sys.exit(0)
class CLInfo:
"""Changelist information: sorted dictionary of NaCl-standard fields."""
def __init__(self, desc):
self._desc = desc
self._vals = collections.OrderedDict([
('git svn id', None),
('hash', None),
('author', None),
('date', None),
('subject', None),
('commits since', None),
('bug', None),
('test', None),
('review url', None),
('reviewers tbr', None),
('reviewers', None),
('body', None),
])
def __getitem__(self, key):
return self._vals[key]
def __setitem__(self, key, val):
assert key in self._vals.keys()
self._vals[key] = str(val)
def __str__(self):
"""Changelist to string.
A short description of the change, e.g.:
r12345: ([email protected]) Subject of the change.
If the change is itself pulling in other changes from
sub-repositories then take its relevant description and append it to
the string. These sub-directory updates are also script-generated
and therefore have a predictable format. e.g.:
r12345: ([email protected]) Subject of the change.
| dead123: ([email protected]) Other change in another repository.
| beef456: ([email protected]) Yet another cross-repository change.
"""
desc = (' r' + self._vals['git svn id'] + ': (' +
self._vals['author'] + ') ' +
self._vals['subject'])
if GitChangesPath(self._vals['hash'], 'pnacl/COMPONENT_REVISIONS'):
git_hash_abbrev = '[0-9a-fA-F]{7}'
email = '[^@)]+@[^)]+\.[^)]+'
desc = '\n'.join([desc] + [
' | ' + line for line in self._vals['body'].split('\n') if
re.match('^ *%s: \(%s\) .*$' % (git_hash_abbrev, email), line)])
return desc
def FmtOut(tr_points_at, pnacl_changes, err=[], msg=[]):
assert isinstance(err, list)
assert isinstance(msg, list)
old_svn_id = tr_points_at['git svn id']
new_svn_id = pnacl_changes[-1]['git svn id'] if pnacl_changes else '?'
changes = '\n'.join([str(cl) for cl in pnacl_changes])
bugs = '\n'.join(list(set(
['BUG= ' + cl['bug'].strip() if cl['bug'] else '' for
cl in pnacl_changes]) - set([''])))
reviewers = ', '.join(list(set(
[r.strip() for r in
(','.join([
cl['author'] + ',' + cl['reviewers tbr'] + ',' + cl['reviewers']
for cl in pnacl_changes])).split(',')]) - set([''])))
return (('*** ERROR ***\n' if err else '') +
'\n\n'.join(err) +
'\n\n'.join(msg) +
('\n\n' if err or msg else '') +
('Update revision for PNaCl r%s->r%s\n\n'
'Pull the following PNaCl changes into NaCl:\n%s\n\n'
'%s\n'
'R= %s\n'
'TEST=git try\n'
'NOTRY=true\n'
'(Please LGTM this change and tick the "commit" box)\n' %
(old_svn_id, new_svn_id, changes, bugs, reviewers)))
def Main():
args = ParseArgs(sys.argv[1:])
tr_points_at = CLInfo('revision update points at PNaCl version')
pnacl_changes = []
msg = []
branch = GitCurrentBranch()
assert branch == 'master', ('Must be on branch master, currently on %s' %
branch)
try:
status = GitStatus()
assert len(status) == 0, ("Repository isn't clean:\n %s" %
'\n '.join(status))
SyncSources()
# The current revision file points at a specific PNaCl LLVM
# version. LLVM is checked-in to the NaCl repository, but its head
# isn't necessarily the one that we currently use in PNaCl.
(pnacl_revision, translator_revision) = GetCurrentRevision()
tr_points_at['git svn id'] = pnacl_revision
tr_points_at['hash'] = FindCommitWithGitSvnId(tr_points_at['git svn id'])
tr_points_at['date'] = GitCommitInfo(
info='date', obj=tr_points_at['hash'], num=1)
tr_points_at['subject'] = GitCommitInfo(
info='subject', obj=tr_points_at['hash'], num=1)
recent_commits = GitCommitsSince(tr_points_at['date'])
tr_points_at['commits since'] = len(recent_commits)
assert len(recent_commits) > 1
if args.svn_id and args.svn_id <= int(tr_points_at['git svn id']):
Done(FmtOut(tr_points_at, pnacl_changes,
err=["Can't update to SVN ID r%s, the current "
"PNaCl revision's SVN ID (r%s) is more recent." %
(args.svn_id, tr_points_at['git svn id'])]))
# Find the commits changing PNaCl files that follow the previous
# PNaCl revision pointer.
pnacl_pathes = ['pnacl/', 'toolchain_build/']
pnacl_hashes = list(set(reduce(
lambda acc, lst: acc + lst,
[[cl for cl in recent_commits[:-1] if
GitChangesPath(cl, path)] for
path in pnacl_pathes])))
for hash in pnacl_hashes:
cl = CLInfo('PNaCl change ' + hash)
cl['hash'] = hash
for i in ['author', 'date', 'subject']:
cl[i] = GitCommitInfo(info=i, obj=hash, num=1)
for k,v in CommitMessageToCleanDict(
GitCommitInfo(info='body', obj=hash, num=1)).iteritems():
cl[k] = v
pnacl_changes.append(cl)
# The PNaCl hashes weren't ordered chronologically, make sure the
# changes are.
pnacl_changes.sort(key=lambda x: int(x['git svn id']))
if args.svn_id:
pnacl_changes = [cl for cl in pnacl_changes if
int(cl['git svn id']) <= args.svn_id]
if len(pnacl_changes) == 0:
Done(FmtOut(tr_points_at, pnacl_changes,
msg=['No PNaCl change since r%s.' %
tr_points_at['git svn id']]))
new_pnacl_revision = pnacl_changes[-1]['git svn id']
new_branch_name = ('pnacl-revision-update-to-%s' %
new_pnacl_revision)
if GitBranchExists(new_branch_name):
# TODO(jfb) Figure out if git-try succeeded, checkout the branch
# and dcommit.
raise Exception("Branch %s already exists, the change hasn't "
"landed yet.\nPlease check trybots and dcommit it "
"manually." % new_branch_name)
if args.dry_run:
DryRun("Would check out branch: " + new_branch_name)
else:
GitCheckoutNewBranch(new_branch_name)
if args.dry_run:
DryRun("Would update PNaCl revision to: %s" % new_pnacl_revision)
else:
SetCurrentRevision(new_pnacl_revision)
for f in REV_FILES:
GitAdd(f)
GitCommit(FmtOut(tr_points_at, pnacl_changes))
upload_res = UploadChanges()
msg += ['Upload result:\n%s' % upload_res]
try_res = GitTry()
msg += ['Try result:\n%s' % try_res]
GitCheckout('master', force=False)
Done(FmtOut(tr_points_at, pnacl_changes, msg=msg))
except SystemExit as e:
# Normal exit.
raise
except (BaseException, Exception) as e:
# Leave the branch around, if any was created: it'll prevent next
# runs of the cronjob from succeeding until the failure is fixed.
out = FmtOut(tr_points_at, pnacl_changes, msg=msg,
err=['Failed at %s: %s' % (datetime.datetime.now(), e)])
sys.stderr.write(out)
if not args.dry_run:
SendEmail(args.email, out)
GitCheckout('master', force=True)
raise
if __name__ == '__main__':
Main()
| wilsonianb/nacl_contracts | build/update_pnacl_tool_revisions.py | Python | bsd-3-clause | 16,688 |
"""
Copyright (c) 2011, The MITRE Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
test
Description: Test module
Initial Version: Feb 23, 2011
@author: Michael Joseph Walsh
"""
def helloworld():
"""
Returns "hello world" annd prints "returning 'hello world'" to the
sys.stdout
"""
print "returning 'hello world'"
return "hello world"
def greaterThanTen(n):
"""
Returns True if 'n' is greater than 10
"""
return n>10
class MyClass(object):
def __init__(self):
self._globals = {}
@property
def globals(self):
return self._globals
@globals.setter
def globals(self, value):
self._globals = value
a = MyClass()
locals = {}
exec("a = 1" ,a.globals, locals)
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
exec("a += 1", a.globals, locals)
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
a.globals["b"] = 5
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
exec("global b;b += 1", a.globals, locals) | nemonik/Intellect | intellect/examples/testing/Test.py | Python | bsd-3-clause | 2,750 |
from django.views.generic import *
| akx/coffin | coffin/views/generic/__init__.py | Python | bsd-3-clause | 35 |
"""News Tests"""
| hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/news/test/__init__.py | Python | bsd-3-clause | 18 |
from django.contrib.gis.geos import GEOSGeometry, LinearRing, Polygon, Point
from django.contrib.gis.maps.google.gmap import GoogleMapException
from math import pi, sin, cos, log, exp, atan
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
def get_width_height(envelope):
# Getting the lower-left, upper-left, and upper-right
# coordinates of the envelope.
ll = Point(envelope[0][0])
ul = Point(envelope[0][1])
ur = Point(envelope[0][2])
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each
# one of the zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the
# parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in xrange(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixl
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude
# value with with the number of degrees/pixel at the given
# zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac)/(1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * ( 2 * atan(exp((px[1] - npix)/ (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0]-delta, px[1]-delta), zoom)
ur = self.pixel_to_lonlat((px[0]+delta, px[1]+delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = get_width_height(env)
center = env.centroid
for z in xrange(self._nzoom):
# Getting the tile at the zoom level.
tile = self.tile(center, z)
tile_w, tile_h = get_width_height(tile)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z-1
# Otherwise, we've zoomed in to the max.
return self._nzoom-1
| hugs/django | django/contrib/gis/maps/google/zoom.py | Python | bsd-3-clause | 6,509 |
import calendar
import json
from datetime import datetime
from time import gmtime, time
from urlparse import parse_qsl, urlparse
from wsgiref.handlers import format_date_time
import jwt
from browserid.errors import ExpiredSignatureError
from django_statsd.clients import statsd
from receipts import certs
from lib.cef_loggers import receipt_cef
from lib.crypto.receipt import sign
from lib.utils import static_url
from services.utils import settings
from utils import (CONTRIB_CHARGEBACK, CONTRIB_NO_CHARGE, CONTRIB_PURCHASE,
CONTRIB_REFUND, log_configure, log_exception, log_info,
mypool)
# Go configure the log.
log_configure()
# This has to be imported after the settings (utils).
import receipts # NOQA, used for patching in the tests
status_codes = {
200: '200 OK',
405: '405 Method Not Allowed',
500: '500 Internal Server Error',
}
class VerificationError(Exception):
pass
class InvalidReceipt(Exception):
"""
InvalidReceipt takes a message, which is then displayed back to the app so
they can understand the failure.
"""
pass
class RefundedReceipt(Exception):
pass
class Verify:
def __init__(self, receipt, environ):
self.receipt = receipt
self.environ = environ
# This is so the unit tests can override the connection.
self.conn, self.cursor = None, None
def check_full(self):
"""
This is the default that verify will use, this will
do the entire stack of checks.
"""
receipt_domain = urlparse(static_url('WEBAPPS_RECEIPT_URL')).netloc
try:
self.decoded = self.decode()
self.check_type('purchase-receipt')
self.check_url(receipt_domain)
self.check_purchase()
except InvalidReceipt, err:
return self.invalid(str(err))
except RefundedReceipt:
return self.refund()
return self.ok_or_expired()
def check_without_purchase(self):
"""
This is what the developer and reviewer receipts do, we aren't
expecting a purchase, but require a specific type and install.
"""
try:
self.decoded = self.decode()
self.check_type('developer-receipt', 'reviewer-receipt')
self.check_url(settings.DOMAIN)
except InvalidReceipt, err:
return self.invalid(str(err))
return self.ok_or_expired()
def check_without_db(self, status):
"""
This is what test receipts do, no purchase or install check.
In this case the return is custom to the caller.
"""
assert status in ['ok', 'expired', 'invalid', 'refunded']
try:
self.decoded = self.decode()
self.check_type('test-receipt')
self.check_url(settings.DOMAIN)
except InvalidReceipt, err:
return self.invalid(str(err))
return getattr(self, status)()
def decode(self):
"""
Verifies that the receipt can be decoded and that the initial
contents of the receipt are correct.
If its invalid, then just return invalid rather than give out any
information.
"""
try:
receipt = decode_receipt(self.receipt)
except:
log_exception({'receipt': '%s...' % self.receipt[:10],
'app': self.get_app_id(raise_exception=False)})
log_info('Error decoding receipt')
raise InvalidReceipt('ERROR_DECODING')
try:
assert receipt['user']['type'] == 'directed-identifier'
except (AssertionError, KeyError):
log_info('No directed-identifier supplied')
raise InvalidReceipt('NO_DIRECTED_IDENTIFIER')
return receipt
def check_type(self, *types):
"""
Verifies that the type of receipt is what we expect.
"""
if self.decoded.get('typ', '') not in types:
log_info('Receipt type not in %s' % ','.join(types))
raise InvalidReceipt('WRONG_TYPE')
def check_url(self, domain):
"""
Verifies that the URL of the verification is what we expect.
:param domain: the domain you expect the receipt to be verified at,
note that "real" receipts are verified at a different domain
from the main marketplace domain.
"""
path = self.environ['PATH_INFO']
parsed = urlparse(self.decoded.get('verify', ''))
if parsed.netloc != domain:
log_info('Receipt had invalid domain')
raise InvalidReceipt('WRONG_DOMAIN')
if parsed.path != path:
log_info('Receipt had the wrong path')
raise InvalidReceipt('WRONG_PATH')
def get_user(self):
"""
Attempt to retrieve the user information from the receipt.
"""
try:
return self.decoded['user']['value']
except KeyError:
# If somehow we got a valid receipt without a uuid
# that's a problem. Log here.
log_info('No user in receipt')
raise InvalidReceipt('NO_USER')
def get_storedata(self):
"""
Attempt to retrieve the storedata information from the receipt.
"""
try:
storedata = self.decoded['product']['storedata']
return dict(parse_qsl(storedata))
except Exception, e:
log_info('Invalid store data: {err}'.format(err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_app_id(self, raise_exception=True):
"""
Attempt to retrieve the app id from the storedata in the receipt.
"""
try:
return int(self.get_storedata()['id'])
except Exception, e:
if raise_exception:
# There was some value for storedata but it was invalid.
log_info('Invalid store data for app id: {err}'.format(
err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_contribution_id(self):
"""
Attempt to retrieve the contribution id
from the storedata in the receipt.
"""
try:
return int(self.get_storedata()['contrib'])
except Exception, e:
# There was some value for storedata but it was invalid.
log_info('Invalid store data for contrib id: {err}'.format(
err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_inapp_id(self):
"""
Attempt to retrieve the inapp id
from the storedata in the receipt.
"""
return self.get_storedata()['inapp_id']
def setup_db(self):
"""
Establish a connection to the database.
All database calls are done at a low level and avoid the
Django ORM.
"""
if not self.cursor:
self.conn = mypool.connect()
self.cursor = self.conn.cursor()
def check_purchase(self):
"""
Verifies that the app or inapp has been purchased.
"""
storedata = self.get_storedata()
if 'contrib' in storedata:
self.check_purchase_inapp()
else:
self.check_purchase_app()
def check_purchase_inapp(self):
"""
Verifies that the inapp has been purchased.
"""
self.setup_db()
sql = """SELECT i.guid, c.type FROM stats_contributions c
JOIN inapp_products i ON i.id=c.inapp_product_id
WHERE c.id = %(contribution_id)s LIMIT 1;"""
self.cursor.execute(
sql,
{'contribution_id': self.get_contribution_id()}
)
result = self.cursor.fetchone()
if not result:
log_info('Invalid in-app receipt, no purchase')
raise InvalidReceipt('NO_PURCHASE')
contribution_inapp_id, purchase_type = result
self.check_purchase_type(purchase_type)
self.check_inapp_product(contribution_inapp_id)
def check_inapp_product(self, contribution_inapp_id):
if contribution_inapp_id != self.get_inapp_id():
log_info('Invalid receipt, inapp_id does not match')
raise InvalidReceipt('NO_PURCHASE')
def check_purchase_app(self):
"""
Verifies that the app has been purchased by the user.
"""
self.setup_db()
sql = """SELECT type FROM addon_purchase
WHERE addon_id = %(app_id)s
AND uuid = %(uuid)s LIMIT 1;"""
self.cursor.execute(sql, {'app_id': self.get_app_id(),
'uuid': self.get_user()})
result = self.cursor.fetchone()
if not result:
log_info('Invalid app receipt, no purchase')
raise InvalidReceipt('NO_PURCHASE')
self.check_purchase_type(result[0])
def check_purchase_type(self, purchase_type):
"""
Verifies that the purchase type is of a valid type.
"""
if purchase_type in (CONTRIB_REFUND, CONTRIB_CHARGEBACK):
log_info('Valid receipt, but refunded')
raise RefundedReceipt
elif purchase_type in (CONTRIB_PURCHASE, CONTRIB_NO_CHARGE):
log_info('Valid receipt')
return
else:
log_info('Valid receipt, but invalid contribution')
raise InvalidReceipt('WRONG_PURCHASE')
def invalid(self, reason=''):
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'verify',
'Invalid receipt'
)
return {'status': 'invalid', 'reason': reason}
def ok_or_expired(self):
# This receipt is ok now let's check it's expiry.
# If it's expired, we'll have to return a new receipt
try:
expire = int(self.decoded.get('exp', 0))
except ValueError:
log_info('Error with expiry in the receipt')
return self.expired()
now = calendar.timegm(gmtime()) + 10 # For any clock skew.
if now > expire:
log_info('This receipt has expired: %s UTC < %s UTC'
% (datetime.utcfromtimestamp(expire),
datetime.utcfromtimestamp(now)))
return self.expired()
return self.ok()
def ok(self):
return {'status': 'ok'}
def refund(self):
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'verify',
'Refunded receipt'
)
return {'status': 'refunded'}
def expired(self):
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'verify',
'Expired receipt'
)
if settings.WEBAPPS_RECEIPT_EXPIRED_SEND:
self.decoded['exp'] = (calendar.timegm(gmtime()) +
settings.WEBAPPS_RECEIPT_EXPIRY_SECONDS)
# Log that we are signing a new receipt as well.
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'sign',
'Expired signing request'
)
return {'status': 'expired',
'receipt': sign(self.decoded)}
return {'status': 'expired'}
def get_headers(length):
return [('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Methods', 'POST'),
('Content-Type', 'application/json'),
('Content-Length', str(length)),
('Cache-Control', 'no-cache'),
('Last-Modified', format_date_time(time()))]
def decode_receipt(receipt):
"""
Cracks the receipt using the private key. This will probably change
to using the cert at some point, especially when we get the HSM.
"""
with statsd.timer('services.decode'):
if settings.SIGNING_SERVER_ACTIVE:
verifier = certs.ReceiptVerifier(valid_issuers=
settings.SIGNING_VALID_ISSUERS)
try:
result = verifier.verify(receipt)
except ExpiredSignatureError:
# Until we can do something meaningful with this, just ignore.
return jwt.decode(receipt.split('~')[1], verify=False)
if not result:
raise VerificationError()
return jwt.decode(receipt.split('~')[1], verify=False)
else:
key = jwt.rsa_load(settings.WEBAPPS_RECEIPT_KEY)
raw = jwt.decode(receipt, key)
return raw
def status_check(environ):
output = ''
# Check we can read from the users_install table, should be nice and
# fast. Anything that fails here, connecting to db, accessing table
# will be an error we need to know about.
if not settings.SIGNING_SERVER_ACTIVE:
return 500, 'SIGNING_SERVER_ACTIVE is not set'
try:
conn = mypool.connect()
cursor = conn.cursor()
cursor.execute('SELECT id FROM users_install ORDER BY id DESC LIMIT 1')
except Exception, err:
return 500, str(err)
return 200, output
def receipt_check(environ):
output = ''
with statsd.timer('services.verify'):
data = environ['wsgi.input'].read()
try:
verify = Verify(data, environ)
return 200, json.dumps(verify.check_full())
except:
log_exception('<none>')
return 500, ''
return output
def application(environ, start_response):
body = ''
path = environ.get('PATH_INFO', '')
if path == '/services/status/':
status, body = status_check(environ)
else:
# Only allow POST through as per spec.
if environ.get('REQUEST_METHOD') != 'POST':
status = 405
else:
status, body = receipt_check(environ)
start_response(status_codes[status], get_headers(len(body)))
return [body]
| andymckay/zamboni | services/verify.py | Python | bsd-3-clause | 14,089 |
from math import sqrt
import numpy as np
from scipy._lib._util import _validate_int
from scipy.optimize import brentq
from scipy.special import ndtri
from ._discrete_distns import binom
from ._common import ConfidenceInterval
class BinomTestResult:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
Methods
-------
proportion_ci :
Compute the confidence interval for the estimate of the proportion.
"""
def __init__(self, k, n, alternative, pvalue, proportion_estimate):
self.k = k
self.n = n
self.alternative = alternative
self.proportion_estimate = proportion_estimate
self.pvalue = pvalue
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"proportion_estimate={self.proportion_estimate}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for the estimated proportion.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.proportion_estimate
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError("method must be one of 'exact', 'wilson' or "
"'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _findp(func):
try:
p = brentq(func, 0, 1)
except RuntimeError:
raise RuntimeError('numerical solver failed to converge when '
'computing the confidence limits') from None
except ValueError as exc:
raise ValueError('brentq raised a ValueError; report this to the '
'SciPy developers') from exc
return p
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
# For reference, the formulas implemented here are from
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
denom = 2*(n + z**2)
center = (2*n*p + z**2)/denom
q = 1 - p
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
lo = center - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
hi = center + dhi
else:
delta = z/denom * sqrt(4*n*p*q + z**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = center - delta
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = center + delta
return lo, hi
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success, i.e. the expected
proportion of successes. The value must be in the interval
``0 <= p <= 1``. The default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `~scipy.stats._result_classes.BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``proportion_estimate``.
Notes
-----
.. versionadded:: 1.7.0
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The estimated proportion is simply ``3/15``:
>>> result.proportion_estimate
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.05684686759024681, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError('k must not be greater than n.')
if not (0 <= p <= 1):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
-d*rerr, np.ceil(p * n), n)
# y is the number of terms between mode and n that are <= d*rerr.
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
# if the first equality doesn't hold, y=n-ix. Otherwise, we
# need to include ix as well as the equality holds. Note that
# the equality will hold in very very rare situations due to rerr.
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
d*rerr, 0, np.floor(p * n))
# y is the number of terms between 0 and mode that are <= d*rerr.
# we need to add a 1 to account for the 0 index.
# For comparing this with old behavior, see
# tst_binary_srch_for_binom_tst method in test_morestats.
y = ix + 1
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
proportion_estimate=k/n, pvalue=pval)
return result
def _binary_search_for_binom_tst(a, d, lo, hi):
"""
Conducts an implicit binary search on a function specified by `a`.
Meant to be used on the binomial PMF for the case of two-sided tests
to obtain the value on the other side of the mode where the tail
probability should be computed. The values on either side of
the mode are always in order, meaning binary search is applicable.
Parameters
----------
a : callable
The function over which to perform binary search. Its values
for inputs lo and hi should be in ascending order.
d : float
The value to search.
lo : int
The lower end of range to search.
hi : int
The higher end of the range to search.
Returns
----------
int
The index, i between lo and hi
such that a(i)<=d<a(i+1)
"""
while lo < hi:
mid = lo + (hi-lo)//2
midval = a(mid)
if midval < d:
lo = mid+1
elif midval > d:
hi = mid-1
else:
return mid
if a(lo) <= d:
return lo
else:
return lo-1
| e-q/scipy | scipy/stats/_binomtest.py | Python | bsd-3-clause | 13,154 |
import re
from collections import namedtuple
import sqlparse
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.db.models import Index
from django.utils.regex_helper import _lazy_re_compile
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk',))
field_size_re = _lazy_re_compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'bigint unsigned': 'PositiveBigIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'varchar': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower().split('(', 1)[0].strip()
return self.base_data_types_reverse[key]
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {'BigIntegerField', 'IntegerField', 'SmallIntegerField'}:
# No support for BigAutoField or SmallAutoField as SQLite treats
# all integer primary keys as signed 64-bit integers.
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(table_name))
return [
FieldInfo(
name, data_type, None, get_field_size(data_type), None, None,
not notnull, default, pk == 1,
)
for cid, name, data_type, notnull, default, pk in cursor.fetchall()
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{'table': table_name, 'column': pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
create_sql, table_type = cursor.fetchone()
if table_type == 'view':
# It might be a view, then no results will be returned
return relations
results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
create_sql, table_type = row
if table_type == 'view':
# Views don't have a primary key.
return None
fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
for field_desc in fields_sql.split(','):
field_desc = field_desc.strip()
m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc)
if m:
return m.group(1) if m.group(1) else m.group(2)
return None
def _get_foreign_key_constraints(self, cursor, table_name):
constraints = {}
cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Remaining on_update/on_delete/match values are of no interest.
id_, _, table, from_, to = row[:5]
constraints['fk_%d' % id_] = {
'columns': [from_],
'primary_key': False,
'unique': False,
'foreign_key': (table, to),
'check': False,
'index': False,
}
return constraints
def _parse_column_or_constraint_definition(self, tokens, columns):
token = None
is_constraint_definition = None
field_name = None
constraint_name = None
unique = False
unique_columns = []
check = False
check_columns = []
braces_deep = 0
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
braces_deep += 1
elif token.match(sqlparse.tokens.Punctuation, ')'):
braces_deep -= 1
if braces_deep < 0:
# End of columns and constraints for table definition.
break
elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ','):
# End of current column or constraint definition.
break
# Detect column or constraint definition by first token.
if is_constraint_definition is None:
is_constraint_definition = token.match(sqlparse.tokens.Keyword, 'CONSTRAINT')
if is_constraint_definition:
continue
if is_constraint_definition:
# Detect constraint name by second token.
if constraint_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
constraint_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
constraint_name = token.value[1:-1]
# Start constraint columns parsing after UNIQUE keyword.
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique = True
unique_braces_deep = braces_deep
elif unique:
if unique_braces_deep == braces_deep:
if unique_columns:
# Stop constraint parsing.
unique = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
unique_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
unique_columns.append(token.value[1:-1])
else:
# Detect field name by first token.
if field_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
field_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
field_name = token.value[1:-1]
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique_columns = [field_name]
# Start constraint columns parsing after CHECK keyword.
if token.match(sqlparse.tokens.Keyword, 'CHECK'):
check = True
check_braces_deep = braces_deep
elif check:
if check_braces_deep == braces_deep:
if check_columns:
# Stop constraint parsing.
check = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
if token.value in columns:
check_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
if token.value[1:-1] in columns:
check_columns.append(token.value[1:-1])
unique_constraint = {
'unique': True,
'columns': unique_columns,
'primary_key': False,
'foreign_key': None,
'check': False,
'index': False,
} if unique_columns else None
check_constraint = {
'check': True,
'columns': check_columns,
'primary_key': False,
'unique': False,
'foreign_key': None,
'index': False,
} if check_columns else None
return constraint_name, unique_constraint, check_constraint, token
def _parse_table_constraints(self, sql, columns):
# Check constraint parsing is based of SQLite syntax diagram.
# https://www.sqlite.org/syntaxdiagrams.html#table-constraint
statement = sqlparse.parse(sql)[0]
constraints = {}
unnamed_constrains_index = 0
tokens = (token for token in statement.flatten() if not token.is_whitespace)
# Go to columns and constraint definition
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
break
# Parse columns and constraint definition
while True:
constraint_name, unique, check, end_token = self._parse_column_or_constraint_definition(tokens, columns)
if unique:
if constraint_name:
constraints[constraint_name] = unique
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = unique
if check:
if constraint_name:
constraints[constraint_name] = check
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = check
if end_token.match(sqlparse.tokens.Punctuation, ')'):
break
return constraints
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Find inline check constraints.
try:
table_schema = cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % (
self.connection.ops.quote_name(table_name),
)
).fetchone()[0]
except TypeError:
# table_name is a view.
pass
else:
columns = {info.name for info in self.get_table_description(cursor, table_name)}
constraints.update(self._parse_table_constraints(table_schema, columns))
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# SQLite 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
# There's at most one row.
sql, = cursor.fetchone() or (None,)
# Inline constraints are already detected in
# _parse_table_constraints(). The reasons to avoid fetching inline
# constraints from `PRAGMA index_list` are:
# - Inline constraints can have a different name and information
# than what `PRAGMA index_list` gives.
# - Not all inline constraints may appear in `PRAGMA index_list`.
if not sql:
# An inline constraint
continue
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": None,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": None,
"check": False,
"index": False,
}
constraints.update(self._get_foreign_key_constraints(cursor, table_name))
return constraints
| kaedroho/django | django/db/backends/sqlite3/introspection.py | Python | bsd-3-clause | 18,452 |
Subsets and Splits