sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def post_request(self, container, resource=None, params=None, accept=None):
"""Send a POST request."""
url = self.make_url(container, resource)
headers = self._make_headers(accept)
try:
rsp = requests.post(url, data=params, headers=headers,
verify=self._verify, timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
if self._dbg_print:
self.__print_req('POST', rsp.url, headers, params)
return self._handle_response(rsp) | Send a POST request. | entailment |
def delete_request(self, container, resource=None, query_items=None,
accept=None):
"""Send a DELETE request."""
url = self.make_url(container, resource)
headers = self._make_headers(accept)
if query_items and isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
try:
rsp = requests.delete(url, params=query_items, headers=headers,
verify=self._verify, timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
if self._dbg_print:
self.__print_req('DELETE', rsp.url, headers, None)
return self._handle_response(rsp) | Send a DELETE request. | entailment |
def download_file(self, container, resource, save_path=None, accept=None,
query_items=None):
"""Download a file.
If a timeout defined, it is not a time limit on the entire download;
rather, an exception is raised if the server has not issued a response
for timeout seconds (more precisely, if no bytes have been received on
the underlying socket for timeout seconds). If no timeout is specified
explicitly, requests do not time out.
"""
url = self.make_url(container, resource)
if not save_path:
save_path = resource.split('/')[-1]
headers = self._make_headers(accept)
if query_items and isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
try:
rsp = requests.get(url, query_items, headers=headers, stream=True,
verify=self._verify, timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
if self._dbg_print:
self.__print_req('GET', rsp.url, headers, None)
if rsp.status_code >= 300:
raise RestHttpError(rsp.status_code, rsp.reason, rsp.text)
file_size_dl = 0
try:
with open(save_path, 'wb') as f:
for buff in rsp.iter_content(chunk_size=16384):
f.write(buff)
except Exception as e:
raise RuntimeError('could not download file: ' + str(e))
finally:
rsp.close()
if self._dbg_print:
print('===> downloaded %d bytes to %s' % (file_size_dl, save_path))
return rsp.status_code, save_path, os.path.getsize(save_path) | Download a file.
If a timeout defined, it is not a time limit on the entire download;
rather, an exception is raised if the server has not issued a response
for timeout seconds (more precisely, if no bytes have been received on
the underlying socket for timeout seconds). If no timeout is specified
explicitly, requests do not time out. | entailment |
def upload_file(self, container, src_file_path, dst_name=None, put=True,
content_type=None):
"""Upload a single file."""
if not os.path.exists(src_file_path):
raise RuntimeError('file not found: ' + src_file_path)
if not dst_name:
dst_name = os.path.basename(src_file_path)
if not content_type:
content_type = "application/octet.stream"
headers = dict(self._base_headers)
if content_type:
headers["content-length"] = content_type
else:
headers["content-length"] = "application/octet.stream"
headers["content-length"] = str(os.path.getsize(src_file_path))
headers['content-disposition'] = 'attachment; filename=' + dst_name
if put:
method = 'PUT'
url = self.make_url(container, dst_name, None)
else:
method = 'POST'
url = self.make_url(container, None, None)
with open(src_file_path, 'rb') as up_file:
try:
rsp = requests.request(method, url, headers=headers,
data=up_file, timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
return self._handle_response(rsp) | Upload a single file. | entailment |
def upload_file_mp(self, container, src_file_path, dst_name=None,
content_type=None):
"""Upload a file using multi-part encoding."""
if not os.path.exists(src_file_path):
raise RuntimeError('file not found: ' + src_file_path)
if not dst_name:
dst_name = os.path.basename(src_file_path)
if not content_type:
content_type = "application/octet.stream"
url = self.make_url(container, None, None)
headers = self._base_headers
with open(src_file_path, 'rb') as up_file:
files = {'file': (dst_name, up_file, content_type)}
try:
rsp = requests.post(url, headers=headers, files=files,
timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
return self._handle_response(rsp) | Upload a file using multi-part encoding. | entailment |
def upload_files(self, container, src_dst_map, content_type=None):
"""Upload multiple files."""
if not content_type:
content_type = "application/octet.stream"
url = self.make_url(container, None, None)
headers = self._base_headers
multi_files = []
try:
for src_path in src_dst_map:
dst_name = src_dst_map[src_path]
if not dst_name:
dst_name = os.path.basename(src_path)
multi_files.append(
('files', (dst_name, open(src_path, 'rb'), content_type)))
rsp = requests.post(url, headers=headers, files=multi_files,
timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
finally:
for n, info in multi_files:
dst, f, ctype = info
f.close()
return self._handle_response(rsp) | Upload multiple files. | entailment |
def new_session(self, server=None, session_name=None, user_name=None,
existing_session=None):
"""Create a new session or attach to existing.
Normally, this function is called automatically, and gets its parameter
values from the environment. It is provided as a public function for
cases when extra control over session creation is required in an
automation script that is adapted to use ReST.
WARNING: This function is not part of the original StcPython.py and if
called directly by an automation script, then that script will not be
able to revert to using the non-ReST API until the call to this
function is removed.
Arguments:
server -- STC server (Lab Server) address. If not set get
value from STC_SERVER_ADDRESS environment variable.
session_name -- Name part of session ID. If not set get value from
STC_SESSION_NAME environment variable.
user_name -- User portion of session ID. If not set get name of
user this script is running as.
existing_session -- Behavior when session already exists. Recognized
values are 'kill' and 'join'. If not set get value
from EXISTING_SESSION environment variable. If not
set to recognized value, raise exception if session
already exists.
See also: stchttp.StcHttp(), stchttp.new_session()
Return:
The internal StcHttp object that is used for this session. This allows
the caller to perform additional interactions with the STC ReST API
beyond what the adapter provides.
"""
if not server:
server = os.environ.get('STC_SERVER_ADDRESS')
if not server:
raise EnvironmentError('STC_SERVER_ADDRESS not set')
self._stc = stchttp.StcHttp(server)
if not session_name:
session_name = os.environ.get('STC_SESSION_NAME')
if not session_name or session_name == '__NEW_TEST_SESSION__':
session_name = None
if not user_name:
try:
# Try to get the name of the current user.
user_name = getpass.getuser()
except:
pass
if not existing_session:
# Try to get existing_session from environ if not passed in.
existing_session = os.environ.get('EXISTING_SESSION')
if existing_session:
existing_session = existing_session.lower()
if existing_session == 'kill':
# Kill any existing session and create a new one.
self._stc.new_session(user_name, session_name, True)
return self._stc
if existing_session == 'join':
# Create a new session, or join if already exists.
try:
self._stc.new_session(user_name, session_name, False)
except RuntimeError as e:
if str(e).find('already exists') >= 0:
sid = ' - '.join((session_name, user_name))
self._stc.join_session(sid)
else:
raise
return self._stc
# Create a new session, raise exception if session already exists.
self._stc.new_session(user_name, session_name, False)
return self._stc | Create a new session or attach to existing.
Normally, this function is called automatically, and gets its parameter
values from the environment. It is provided as a public function for
cases when extra control over session creation is required in an
automation script that is adapted to use ReST.
WARNING: This function is not part of the original StcPython.py and if
called directly by an automation script, then that script will not be
able to revert to using the non-ReST API until the call to this
function is removed.
Arguments:
server -- STC server (Lab Server) address. If not set get
value from STC_SERVER_ADDRESS environment variable.
session_name -- Name part of session ID. If not set get value from
STC_SESSION_NAME environment variable.
user_name -- User portion of session ID. If not set get name of
user this script is running as.
existing_session -- Behavior when session already exists. Recognized
values are 'kill' and 'join'. If not set get value
from EXISTING_SESSION environment variable. If not
set to recognized value, raise exception if session
already exists.
See also: stchttp.StcHttp(), stchttp.new_session()
Return:
The internal StcHttp object that is used for this session. This allows
the caller to perform additional interactions with the STC ReST API
beyond what the adapter provides. | entailment |
def _end_session(self, kill=None):
"""End the client session."""
if self._stc:
if kill is None:
kill = os.environ.get('STC_SESSION_TERMINATE_ON_DISCONNECT')
kill = _is_true(kill)
self._stc.end_session(kill)
self._stc = None | End the client session. | entailment |
def setup(app):
"""
This isn't used in Production,
but allows this module to be used as a standalone extension.
"""
app.add_directive('readthedocs-embed', EmbedDirective)
app.add_config_value('readthedocs_embed_project', '', 'html')
app.add_config_value('readthedocs_embed_version', '', 'html')
app.add_config_value('readthedocs_embed_doc', '', 'html')
return app | This isn't used in Production,
but allows this module to be used as a standalone extension. | entailment |
def finalize_media(app):
"""Point media files at our media server."""
if (app.builder.name == 'readthedocssinglehtmllocalmedia' or
app.builder.format != 'html' or
not hasattr(app.builder, 'script_files')):
return # Use local media for downloadable files
# Pull project data from conf.py if it exists
context = app.builder.config.html_context
STATIC_URL = context.get('STATIC_URL', DEFAULT_STATIC_URL)
js_file = '{}javascript/readthedocs-doc-embed.js'.format(STATIC_URL)
if sphinx.version_info < (1, 8):
app.builder.script_files.append(js_file)
else:
app.add_js_file(js_file) | Point media files at our media server. | entailment |
def update_body(app, pagename, templatename, context, doctree):
"""
Add Read the Docs content to Sphinx body content.
This is the most reliable way to inject our content into the page.
"""
STATIC_URL = context.get('STATIC_URL', DEFAULT_STATIC_URL)
online_builders = [
'readthedocs', 'readthedocsdirhtml', 'readthedocssinglehtml'
]
if app.builder.name == 'readthedocssinglehtmllocalmedia':
if 'html_theme' in context and context['html_theme'] == 'sphinx_rtd_theme':
theme_css = '_static/css/theme.css'
else:
theme_css = '_static/css/badge_only.css'
elif app.builder.name in online_builders:
if 'html_theme' in context and context['html_theme'] == 'sphinx_rtd_theme':
theme_css = '%scss/sphinx_rtd_theme.css' % STATIC_URL
else:
theme_css = '%scss/badge_only.css' % STATIC_URL
else:
# Only insert on our HTML builds
return
inject_css = True
# Starting at v0.4.0 of the sphinx theme, the theme CSS should not be injected
# This decouples the theme CSS (which is versioned independently) from readthedocs.org
if theme_css.endswith('sphinx_rtd_theme.css'):
try:
import sphinx_rtd_theme
inject_css = LooseVersion(sphinx_rtd_theme.__version__) < LooseVersion('0.4.0')
except ImportError:
pass
if inject_css and theme_css not in app.builder.css_files:
if sphinx.version_info < (1, 8):
app.builder.css_files.insert(0, theme_css)
else:
app.add_css_file(theme_css)
# This is monkey patched on the signal because we can't know what the user
# has done with their `app.builder.templates` before now.
if not hasattr(app.builder.templates.render, '_patched'):
# Janky monkey patch of template rendering to add our content
old_render = app.builder.templates.render
def rtd_render(self, template, render_context):
"""
A decorator that renders the content with the users template renderer,
then adds the Read the Docs HTML content at the end of body.
"""
# Render Read the Docs content
template_context = render_context.copy()
template_context['rtd_css_url'] = '{}css/readthedocs-doc-embed.css'.format(STATIC_URL)
template_context['rtd_analytics_url'] = '{}javascript/readthedocs-analytics.js'.format(
STATIC_URL,
)
source = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'_templates',
'readthedocs-insert.html.tmpl'
)
templ = open(source).read()
rtd_content = app.builder.templates.render_string(templ, template_context)
# Handle original render function
content = old_render(template, render_context)
end_body = content.lower().find('</head>')
# Insert our content at the end of the body.
if end_body != -1:
content = content[:end_body] + rtd_content + "\n" + content[end_body:]
else:
log.debug("File doesn't look like HTML. Skipping RTD content addition")
return content
rtd_render._patched = True
app.builder.templates.render = types.MethodType(rtd_render,
app.builder.templates) | Add Read the Docs content to Sphinx body content.
This is the most reliable way to inject our content into the page. | entailment |
def generate_json_artifacts(app, pagename, templatename, context, doctree):
"""
Generate JSON artifacts for each page.
This way we can skip generating this in other build step.
"""
try:
# We need to get the output directory where the docs are built
# _build/json.
build_json = os.path.abspath(
os.path.join(app.outdir, '..', 'json')
)
outjson = os.path.join(build_json, pagename + '.fjson')
outdir = os.path.dirname(outjson)
if not os.path.exists(outdir):
os.makedirs(outdir)
with open(outjson, 'w+') as json_file:
to_context = {
key: context.get(key, '')
for key in KEYS
}
json.dump(to_context, json_file, indent=4)
except TypeError:
log.exception(
'Fail to encode JSON for page {page}'.format(page=outjson)
)
except IOError:
log.exception(
'Fail to save JSON output for page {page}'.format(page=outjson)
)
except Exception as e:
log.exception(
'Failure in JSON search dump for page {page}'.format(page=outjson)
) | Generate JSON artifacts for each page.
This way we can skip generating this in other build step. | entailment |
def _copy_searchtools(self, renderer=None):
"""Copy and patch searchtools
This uses the included Sphinx version's searchtools, but patches it to
remove automatic initialization. This is a fork of
``sphinx.util.fileutil.copy_asset``
"""
log.info(bold('copying searchtools... '), nonl=True)
if sphinx.version_info < (1, 8):
search_js_file = 'searchtools.js_t'
else:
search_js_file = 'searchtools.js'
path_src = os.path.join(
package_dir, 'themes', 'basic', 'static', search_js_file
)
if os.path.exists(path_src):
path_dest = os.path.join(self.outdir, '_static', 'searchtools.js')
if renderer is None:
# Sphinx 1.4 used the renderer from the existing builder, but
# the pattern for Sphinx 1.5 is to pass in a renderer separate
# from the builder. This supports both patterns for future
# compatibility
if sphinx.version_info < (1, 5):
renderer = self.templates
else:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
with codecs.open(path_src, 'r', encoding='utf-8') as h_src:
with codecs.open(path_dest, 'w', encoding='utf-8') as h_dest:
data = h_src.read()
data = self.REPLACEMENT_PATTERN.sub(self.REPLACEMENT_TEXT, data)
h_dest.write(renderer.render_string(
data,
self.get_static_readthedocs_context()
))
else:
log.warning('Missing {}'.format(search_js_file))
log.info('done') | Copy and patch searchtools
This uses the included Sphinx version's searchtools, but patches it to
remove automatic initialization. This is a fork of
``sphinx.util.fileutil.copy_asset`` | entailment |
def stc_system_info(stc_addr):
"""Return dictionary of STC and API information.
If a session already exists, then use it to get STC information and avoid
taking the time to start a new session. A session is necessary to get
STC information.
"""
stc = stchttp.StcHttp(stc_addr)
sessions = stc.sessions()
if sessions:
# If a session already exists, use it to get STC information.
stc.join_session(sessions[0])
sys_info = stc.system_info()
else:
# Create a new session to get STC information.
stc.new_session('anonymous')
try:
sys_info = stc.system_info()
finally:
# Make sure the temporary session in terminated.
stc.end_session()
return sys_info | Return dictionary of STC and API information.
If a session already exists, then use it to get STC information and avoid
taking the time to start a new session. A session is necessary to get
STC information. | entailment |
def new_session(self, user_name=None, session_name=None,
kill_existing=False, analytics=None):
"""Create a new test session.
The test session is identified by the specified user_name and optional
session_name parameters. If a session name is not specified, then the
server will create one.
Arguments:
user_name -- User name part of session ID.
session_name -- Session name part of session ID.
kill_existing -- If there is an existing session, with the same session
name and user name, then terminate it before creating
a new session
analytics -- Optional boolean value to disable or enable analytics
for new session. None will use setting configured on
server.
Return:
True is session started, False if session was already started.
"""
if self.started():
return False
if not session_name or not session_name.strip():
session_name = ''
if not user_name or not user_name.strip():
user_name = ''
params = {'userid': user_name, 'sessionname': session_name}
if analytics not in (None, ''):
params['analytics'] = str(analytics).lower()
try:
status, data = self._rest.post_request('sessions', None, params)
except resthttp.RestHttpError as e:
if kill_existing and str(e).find('already exists') >= 0:
self.end_session('kill', ' - '.join((session_name, user_name)))
else:
raise RuntimeError('failed to create session: ' + str(e))
# Starting session
if self._dbg_print:
print('===> starting session')
status, data = self._rest.post_request('sessions', None, params)
if self._dbg_print:
print('===> OK, started')
sid = data['session_id']
if self._dbg_print:
print('===> session ID:', sid)
print('===> URL:', self._rest.make_url('sessions', sid))
self._rest.add_header('X-STC-API-Session', sid)
self._sid = sid
return sid | Create a new test session.
The test session is identified by the specified user_name and optional
session_name parameters. If a session name is not specified, then the
server will create one.
Arguments:
user_name -- User name part of session ID.
session_name -- Session name part of session ID.
kill_existing -- If there is an existing session, with the same session
name and user name, then terminate it before creating
a new session
analytics -- Optional boolean value to disable or enable analytics
for new session. None will use setting configured on
server.
Return:
True is session started, False if session was already started. | entailment |
def join_session(self, sid):
"""Attach to an existing session."""
self._rest.add_header('X-STC-API-Session', sid)
self._sid = sid
try:
status, data = self._rest.get_request('objects', 'system1',
['version', 'name'])
except resthttp.RestHttpError as e:
self._rest.del_header('X-STC-API-Session')
self._sid = None
raise RuntimeError('failed to join session "%s": %s' % (sid, e))
return data['version'] | Attach to an existing session. | entailment |
def end_session(self, end_tcsession=True, sid=None):
"""End this test session.
A session can be ended in three ways, depending on the value of the
end_tcsession parameter:
- end_tcsession=None:
Stop using session locally, do not contact server.
- end_tcsession=False:
End client controller, but leave test session on server.
- end_tcsession=True:
End client controller and terminate test session (default).
- end_tcsession='kill':
Forcefully terminate test session.
Specifying end_tcsession=False is useful to do before attaching an STC
GUI or legacy automation script, so that there are not multiple
controllers to interfere with each other.
When the session is ended, it is no longer available. Clients should
export any result or log files, that they want to preserve, before the
session is ended.
Arguments
end_tcsession -- How to end the session (see above)
sid -- ID of session to end. None to use current session.
Return:
True if session ended, false if session was not started.
"""
if not sid or sid == self._sid:
if not self.started():
return False
sid = self._sid
self._sid = None
self._rest.del_header('X-STC-API-Session')
if end_tcsession is None:
if self._dbg_print:
print('===> detached from session')
return True
try:
if end_tcsession:
if self._dbg_print:
print('===> deleting session:', sid)
if end_tcsession == 'kill':
status, data = self._rest.delete_request(
'sessions', sid, 'kill')
else:
status, data = self._rest.delete_request('sessions', sid)
count = 0
while 1:
time.sleep(5)
if self._dbg_print:
print('===> checking if session ended')
ses_list = self.sessions()
if not ses_list or sid not in ses_list:
break
count += 1
if count == 3:
raise RuntimeError("test session has not stopped")
if self._dbg_print:
print('===> ok - deleted test session')
else:
# Ending client session is supported on version >= 2.1.5
if self._get_api_version() < (2, 1, 5):
raise RuntimeError('option no available on server')
status, data = self._rest.delete_request(
'sessions', sid, 'false')
if self._dbg_print:
print('===> OK - detached REST API from test session')
except resthttp.RestHttpError as e:
raise RuntimeError('failed to end session: ' + str(e))
return True | End this test session.
A session can be ended in three ways, depending on the value of the
end_tcsession parameter:
- end_tcsession=None:
Stop using session locally, do not contact server.
- end_tcsession=False:
End client controller, but leave test session on server.
- end_tcsession=True:
End client controller and terminate test session (default).
- end_tcsession='kill':
Forcefully terminate test session.
Specifying end_tcsession=False is useful to do before attaching an STC
GUI or legacy automation script, so that there are not multiple
controllers to interfere with each other.
When the session is ended, it is no longer available. Clients should
export any result or log files, that they want to preserve, before the
session is ended.
Arguments
end_tcsession -- How to end the session (see above)
sid -- ID of session to end. None to use current session.
Return:
True if session ended, false if session was not started. | entailment |
def session_info(self, session_id=None):
"""Get information on session.
If session_id is None, the default, then return information about this
session. If a session ID is given, then get information about that
session.
Arguments:
session_id -- Id of session to get info for, if not this session.
Return:
Dictionary of session information.
"""
if not session_id:
if not self.started():
return []
session_id = self._sid
status, data = self._rest.get_request('sessions', session_id)
return data | Get information on session.
If session_id is None, the default, then return information about this
session. If a session ID is given, then get information about that
session.
Arguments:
session_id -- Id of session to get info for, if not this session.
Return:
Dictionary of session information. | entailment |
def files(self):
"""Get list of files, for this session, on server."""
self._check_session()
status, data = self._rest.get_request('files')
return data | Get list of files, for this session, on server. | entailment |
def bll_version(self):
"""Get the BLL version this session is connected to.
Return:
Version string if session started. None if session not started.
"""
if not self.started():
return None
status, data = self._rest.get_request('objects', 'system1',
['version', 'name'])
return data['version'] | Get the BLL version this session is connected to.
Return:
Version string if session started. None if session not started. | entailment |
def get(self, handle, *args):
"""Returns the value(s) of one or more object attributes.
If multiple arguments, this method returns a dictionary of argument
names mapped to the value returned by each argument.
If a single argument is given, then the response is a list of values
for that argument.
Arguments:
handle -- Handle that identifies object to get info for.
*args -- Zero or more attributes or relationships.
Return:
If multiple input arguments are given:
{attrib_name:attrib_val, attrib_name:attrib_val, ..}
If single input argument is given, then a single string value is
returned. NOTE: If the string contains multiple substrings, then the
client will need to parse these.
"""
self._check_session()
status, data = self._rest.get_request('objects', str(handle), args)
return data | Returns the value(s) of one or more object attributes.
If multiple arguments, this method returns a dictionary of argument
names mapped to the value returned by each argument.
If a single argument is given, then the response is a list of values
for that argument.
Arguments:
handle -- Handle that identifies object to get info for.
*args -- Zero or more attributes or relationships.
Return:
If multiple input arguments are given:
{attrib_name:attrib_val, attrib_name:attrib_val, ..}
If single input argument is given, then a single string value is
returned. NOTE: If the string contains multiple substrings, then the
client will need to parse these. | entailment |
def create(self, object_type, under=None, attributes=None, **kwattrs):
"""Create a new automation object.
Arguments:
object_type -- Type of object to create.
under -- Handle of the parent of the new object.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs).
Return:
Handle of newly created object.
"""
data = self.createx(object_type, under, attributes, **kwattrs)
return data['handle'] | Create a new automation object.
Arguments:
object_type -- Type of object to create.
under -- Handle of the parent of the new object.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs).
Return:
Handle of newly created object. | entailment |
def createx(self, object_type, under=None, attributes=None, **kwattrs):
"""Create a new automation object.
Arguments:
object_type -- Type of object to create.
under -- Handle of the parent of the new object.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs).
Return:
Dictionary containing handle of newly created object.
"""
self._check_session()
params = {'object_type': object_type}
if under:
params['under'] = under
if attributes:
params.update(attributes)
if kwattrs:
params.update(kwattrs)
status, data = self._rest.post_request('objects', None, params)
return data | Create a new automation object.
Arguments:
object_type -- Type of object to create.
under -- Handle of the parent of the new object.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs).
Return:
Dictionary containing handle of newly created object. | entailment |
def delete(self, handle):
"""Delete the specified object.
Arguments:
handle -- Handle of object to delete.
"""
self._check_session()
self._rest.delete_request('objects', str(handle)) | Delete the specified object.
Arguments:
handle -- Handle of object to delete. | entailment |
def perform(self, command, params=None, **kwargs):
"""Execute a command.
Arguments can be supplied either as a dictionary or as keyword
arguments. Examples:
stc.perform('LoadFromXml', {'filename':'config.xml'})
stc.perform('LoadFromXml', filename='config.xml')
Arguments:
command -- Command to execute.
params -- Optional. Dictionary of parameters (name-value pairs).
kwargs -- Optional keyword arguments (name=value pairs).
Return:
Data from command.
"""
self._check_session()
if not params:
params = {}
if kwargs:
params.update(kwargs)
params['command'] = command
status, data = self._rest.post_request('perform', None, params)
return data | Execute a command.
Arguments can be supplied either as a dictionary or as keyword
arguments. Examples:
stc.perform('LoadFromXml', {'filename':'config.xml'})
stc.perform('LoadFromXml', filename='config.xml')
Arguments:
command -- Command to execute.
params -- Optional. Dictionary of parameters (name-value pairs).
kwargs -- Optional keyword arguments (name=value pairs).
Return:
Data from command. | entailment |
def config(self, handle, attributes=None, **kwattrs):
"""Sets or modifies one or more object attributes or relations.
Arguments can be supplied either as a dictionary or as keyword
arguments. Examples:
stc.config('port1', location='//10.1.2.3/1/1')
stc.config('port2', {'location': '//10.1.2.3/1/2'})
Arguments:
handle -- Handle of object to modify.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs).
"""
self._check_session()
if kwattrs:
if attributes:
attributes.update(kwattrs)
else:
attributes = kwattrs
self._rest.put_request('objects', str(handle), attributes) | Sets or modifies one or more object attributes or relations.
Arguments can be supplied either as a dictionary or as keyword
arguments. Examples:
stc.config('port1', location='//10.1.2.3/1/1')
stc.config('port2', {'location': '//10.1.2.3/1/2'})
Arguments:
handle -- Handle of object to modify.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs). | entailment |
def chassis(self):
"""Get list of chassis known to test session."""
self._check_session()
status, data = self._rest.get_request('chassis')
return data | Get list of chassis known to test session. | entailment |
def chassis_info(self, chassis):
"""Get information about the specified chassis."""
if not chassis or not isinstance(chassis, str):
raise RuntimeError('missing chassis address')
self._check_session()
status, data = self._rest.get_request('chassis', chassis)
return data | Get information about the specified chassis. | entailment |
def connections(self):
"""Get list of connections."""
self._check_session()
status, data = self._rest.get_request('connections')
return data | Get list of connections. | entailment |
def is_connected(self, chassis):
"""Get Boolean connected status of the specified chassis."""
self._check_session()
try:
status, data = self._rest.get_request('connections', chassis)
except resthttp.RestHttpError as e:
if int(e) == 404:
# 404 NOT FOUND means the chassis in unknown, so return false.
return False
return bool(data and data.get('IsConnected')) | Get Boolean connected status of the specified chassis. | entailment |
def connect(self, chassis_list):
"""Establish connection to one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names)
Return:
List of chassis addresses.
"""
self._check_session()
if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)):
chassis_list = (chassis_list,)
if len(chassis_list) == 1:
status, data = self._rest.put_request(
'connections', chassis_list[0])
data = [data]
else:
params = {chassis: True for chassis in chassis_list}
params['action'] = 'connect'
status, data = self._rest.post_request('connections', None, params)
return data | Establish connection to one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names)
Return:
List of chassis addresses. | entailment |
def disconnect(self, chassis_list):
"""Remove connection with one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names)
"""
self._check_session()
if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)):
chassis_list = (chassis_list,)
if len(chassis_list) == 1:
self._rest.delete_request('connections', chassis_list[0])
else:
params = {chassis: True for chassis in chassis_list}
params['action'] = 'disconnect'
self._rest.post_request('connections', None, params) | Remove connection with one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names) | entailment |
def help(self, subject=None, args=None):
"""Get help information about Automation API.
The following values can be specified for the subject:
None -- gets an overview of help.
'commands' -- gets a list of API functions
command name -- get info about the specified command.
object type -- get info about the specified object type
handle value -- get info about the object type referred to
Arguments:
subject -- Optional. Subject to get help on.
args -- Optional. Additional arguments for searching help. These
are used when the subject is 'list'.
Return:
String of help information.
"""
if subject:
if subject not in (
'commands', 'create', 'config', 'get', 'delete', 'perform',
'connect', 'connectall', 'disconnect', 'disconnectall',
'apply', 'log', 'help'):
self._check_session()
status, data = self._rest.get_request('help', subject, args)
else:
status, data = self._rest.get_request('help')
if isinstance(data, (list, tuple, set)):
return ' '.join((str(i) for i in data))
return data['message'] | Get help information about Automation API.
The following values can be specified for the subject:
None -- gets an overview of help.
'commands' -- gets a list of API functions
command name -- get info about the specified command.
object type -- get info about the specified object type
handle value -- get info about the object type referred to
Arguments:
subject -- Optional. Subject to get help on.
args -- Optional. Additional arguments for searching help. These
are used when the subject is 'list'.
Return:
String of help information. | entailment |
def log(self, level, msg):
"""Write a diagnostic message to a log file or to standard output.
Arguments:
level -- Severity level of entry. One of: INFO, WARN, ERROR, FATAL.
msg -- Message to write to log.
"""
self._check_session()
level = level.upper()
allowed_levels = ('INFO', 'WARN', 'ERROR', 'FATAL')
if level not in allowed_levels:
raise ValueError('level must be one of: ' +
', '.join(allowed_levels))
self._rest.post_request(
'log', None, {'log_level': level.upper(), 'message': msg}) | Write a diagnostic message to a log file or to standard output.
Arguments:
level -- Severity level of entry. One of: INFO, WARN, ERROR, FATAL.
msg -- Message to write to log. | entailment |
def download(self, file_name, save_as=None):
"""Download the specified file from the server.
Arguments:
file_name -- Name of file resource to save.
save_as -- Optional path name to write file to. If not specified,
then file named by the last part of the resource path is
downloaded to current directory.
Return: (save_path, bytes)
save_path -- Path where downloaded file was saved.
bytes -- Bytes downloaded.
"""
self._check_session()
try:
if save_as:
save_as = os.path.normpath(save_as)
save_dir = os.path.dirname(save_as)
if save_dir:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
elif not os.path.isdir(save_dir):
raise RuntimeError(save_dir + " is not a directory")
status, save_path, bytes = self._rest.download_file(
'files', file_name, save_as, 'application/octet-stream')
except resthttp.RestHttpError as e:
raise RuntimeError('failed to download "%s": %s' % (file_name, e))
return save_path, bytes | Download the specified file from the server.
Arguments:
file_name -- Name of file resource to save.
save_as -- Optional path name to write file to. If not specified,
then file named by the last part of the resource path is
downloaded to current directory.
Return: (save_path, bytes)
save_path -- Path where downloaded file was saved.
bytes -- Bytes downloaded. | entailment |
def download_all(self, dst_dir=None):
"""Download all available files.
Arguments:
dst_dir -- Optional destination directory to write files to. If not
specified, then files are downloaded current directory.
Return:
Dictionary of {file_name: file_size, ..}
"""
saved = {}
save_as = None
for f in self.files():
if dst_dir:
save_as = os.path.join(dst_dir, f.split('/')[-1])
name, bytes = self.download(f, save_as)
saved[name] = bytes
return saved | Download all available files.
Arguments:
dst_dir -- Optional destination directory to write files to. If not
specified, then files are downloaded current directory.
Return:
Dictionary of {file_name: file_size, ..} | entailment |
def upload(self, src_file_path, dst_file_name=None):
"""Upload the specified file to the server."""
self._check_session()
status, data = self._rest.upload_file(
'files', src_file_path, dst_file_name)
return data | Upload the specified file to the server. | entailment |
def wait_until_complete(self, timeout=None):
"""Wait until sequencer is finished.
This method blocks your application until the sequencer has completed
its operation. It returns once the sequencer has finished.
Arguments:
timeout -- Optional. Seconds to wait for sequencer to finish. If this
time is exceeded, then an exception is raised.
Return:
Sequencer testState value.
"""
timeout_at = None
if timeout:
timeout_at = time.time() + int(timeout)
sequencer = self.get('system1', 'children-sequencer')
while True:
cur_test_state = self.get(sequencer, 'state')
if 'PAUSE' in cur_test_state or 'IDLE' in cur_test_state:
break
time.sleep(2)
if timeout_at and time.time() >= timeout_at:
raise RuntimeError('wait_until_complete timed out after %s sec'
% timeout)
return self.get(sequencer, 'testState') | Wait until sequencer is finished.
This method blocks your application until the sequencer has completed
its operation. It returns once the sequencer has finished.
Arguments:
timeout -- Optional. Seconds to wait for sequencer to finish. If this
time is exceeded, then an exception is raised.
Return:
Sequencer testState value. | entailment |
def ManagerMock(manager, *return_value):
"""
Set the results to two items:
>>> objects = ManagerMock(Post.objects, 'queryset', 'result')
>>> assert objects.filter() == objects.all()
Force an exception:
>>> objects = ManagerMock(Post.objects, Exception())
See QuerySetMock for more about how this works.
"""
def make_get_query_set(self, model):
def _get(*a, **k):
return QuerySetMock(model, *return_value)
return _get
actual_model = getattr(manager, 'model', None)
if actual_model:
model = mock.MagicMock(spec=actual_model())
else:
model = mock.MagicMock()
m = SharedMock()
m.model = model
m.get_query_set = make_get_query_set(m, actual_model)
m.get = m.get_query_set().get
m.count = m.get_query_set().count
m.exists = m.get_query_set().exists
m.__iter__ = m.get_query_set().__iter__
m.__getitem__ = m.get_query_set().__getitem__
return m | Set the results to two items:
>>> objects = ManagerMock(Post.objects, 'queryset', 'result')
>>> assert objects.filter() == objects.all()
Force an exception:
>>> objects = ManagerMock(Post.objects, Exception())
See QuerySetMock for more about how this works. | entailment |
def assert_chain_calls(self, *calls):
"""
Asserts that a chained method was called (parents in the chain do not
matter, nor are they tracked). Use with `mock.call`.
>>> obj.filter(foo='bar').select_related('baz')
>>> obj.assert_chain_calls(mock.call.filter(foo='bar'))
>>> obj.assert_chain_calls(mock.call.select_related('baz'))
>>> obj.assert_chain_calls(mock.call.reverse())
*** AssertionError: [call.reverse()] not all found in call list, ...
"""
all_calls = self.__parent.mock_calls[:]
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
if self.__parent.mock_calls:
message = '%r not all found in call list, %d other(s) were:\n%r' % (not_found, len(self.__parent.mock_calls), self.__parent.mock_calls)
else:
message = 'no calls were found'
raise AssertionError(message) | Asserts that a chained method was called (parents in the chain do not
matter, nor are they tracked). Use with `mock.call`.
>>> obj.filter(foo='bar').select_related('baz')
>>> obj.assert_chain_calls(mock.call.filter(foo='bar'))
>>> obj.assert_chain_calls(mock.call.select_related('baz'))
>>> obj.assert_chain_calls(mock.call.reverse())
*** AssertionError: [call.reverse()] not all found in call list, ... | entailment |
def mock_signal_receiver(signal, wraps=None, **kwargs):
"""
Temporarily attaches a receiver to the provided ``signal`` within the scope
of the context manager.
The mocked receiver is returned as the ``as`` target of the ``with``
statement.
To have the mocked receiver wrap a callable, pass the callable as the
``wraps`` keyword argument. All other keyword arguments provided are passed
through to the signal's ``connect`` method.
>>> with mock_signal_receiver(post_save, sender=Model) as receiver:
>>> Model.objects.create()
>>> assert receiver.call_count = 1
"""
if wraps is None:
def wraps(*args, **kwrags):
return None
receiver = mock.Mock(wraps=wraps)
signal.connect(receiver, **kwargs)
yield receiver
signal.disconnect(receiver) | Temporarily attaches a receiver to the provided ``signal`` within the scope
of the context manager.
The mocked receiver is returned as the ``as`` target of the ``with``
statement.
To have the mocked receiver wrap a callable, pass the callable as the
``wraps`` keyword argument. All other keyword arguments provided are passed
through to the signal's ``connect`` method.
>>> with mock_signal_receiver(post_save, sender=Model) as receiver:
>>> Model.objects.create()
>>> assert receiver.call_count = 1 | entailment |
def QuerySetMock(model, *return_value):
"""
Get a SharedMock that returns self for most attributes and a new copy of
itself for any method that ordinarily generates QuerySets.
Set the results to two items:
>>> class Post(object): pass
>>> objects = QuerySetMock(Post, 'return', 'values')
>>> assert list(objects.filter()) == list(objects.all())
Force an exception:
>>> objects = QuerySetMock(Post, Exception())
Chain calls:
>>> objects.all().filter(filter_arg='dummy')
"""
def make_get(self, model):
def _get(*a, **k):
results = list(self)
if len(results) > 1:
raise model.MultipleObjectsReturned
try:
return results[0]
except IndexError:
raise model.DoesNotExist
return _get
def make_qs_returning_method(self):
def _qs_returning_method(*a, **k):
return copy.deepcopy(self)
return _qs_returning_method
def make_getitem(self):
def _getitem(k):
if isinstance(k, slice):
self.__start = k.start
self.__stop = k.stop
else:
return list(self)[k]
return self
return _getitem
def make_iterator(self):
def _iterator(*a, **k):
if len(return_value) == 1 and isinstance(return_value[0], Exception):
raise return_value[0]
start = getattr(self, '__start', None)
stop = getattr(self, '__stop', None)
for x in return_value[start:stop]:
yield x
return _iterator
actual_model = model
if actual_model:
model = mock.MagicMock(spec=actual_model())
else:
model = mock.MagicMock()
m = SharedMock(reserved=['count', 'exists'] + QUERYSET_RETURNING_METHODS)
m.__start = None
m.__stop = None
m.__iter__.side_effect = lambda: iter(m.iterator())
m.__getitem__.side_effect = make_getitem(m)
if hasattr(m, "__nonzero__"):
# Python 2
m.__nonzero__.side_effect = lambda: bool(return_value)
m.exists.side_effect = m.__nonzero__
else:
# Python 3
m.__bool__.side_effect = lambda: bool(return_value)
m.exists.side_effect = m.__bool__
m.__len__.side_effect = lambda: len(return_value)
m.count.side_effect = m.__len__
m.model = model
m.get = make_get(m, actual_model)
for method_name in QUERYSET_RETURNING_METHODS:
setattr(m, method_name, make_qs_returning_method(m))
# Note since this is a SharedMock, *all* auto-generated child
# attributes will have the same side_effect ... might not make
# sense for some like count().
m.iterator.side_effect = make_iterator(m)
return m | Get a SharedMock that returns self for most attributes and a new copy of
itself for any method that ordinarily generates QuerySets.
Set the results to two items:
>>> class Post(object): pass
>>> objects = QuerySetMock(Post, 'return', 'values')
>>> assert list(objects.filter()) == list(objects.all())
Force an exception:
>>> objects = QuerySetMock(Post, Exception())
Chain calls:
>>> objects.all().filter(filter_arg='dummy') | entailment |
def read(self, *args, **kwargs):
'''Overridden read() method to call parse_flask_section() at the end'''
ret = configparser.SafeConfigParser.read(self, *args, **kwargs)
self.parse_flask_section()
return ret | Overridden read() method to call parse_flask_section() at the end | entailment |
def readfp(self, *args, **kwargs):
'''Overridden readfp() method to call parse_flask_section() at the
end'''
ret = configparser.SafeConfigParser.readfp(self, *args, **kwargs)
self.parse_flask_section()
return ret | Overridden readfp() method to call parse_flask_section() at the
end | entailment |
def parse_flask_section(self):
'''Parse the [flask] section of your config and hand off the config
to the app in context.
Config vars should have the same name as their flask equivalent except
in all lower-case.'''
if self.has_section('flask'):
for item in self.items('flask'):
self._load_item(item[0])
else:
warnings.warn("No [flask] section found in config") | Parse the [flask] section of your config and hand off the config
to the app in context.
Config vars should have the same name as their flask equivalent except
in all lower-case. | entailment |
def _load_item(self, key):
'''Load the specified item from the [flask] section. Type is
determined by the type of the equivalent value in app.default_config
or string if unknown.'''
key_u = key.upper()
default = current_app.default_config.get(key_u)
# One of the default config vars is a timedelta - interpret it
# as an int and construct using it
if isinstance(default, datetime.timedelta):
current_app.config[key_u] = datetime.timedelta(self.getint('flask', key))
elif isinstance(default, bool):
current_app.config[key_u] = self.getboolean('flask', key)
elif isinstance(default, float):
current_app.config[key_u] = self.getfloat('flask', key)
elif isinstance(default, int):
current_app.config[key_u] = self.getint('flask', key)
else:
# All the string keys need to be coerced into str()
# because Flask expects some of them not to be unicode
current_app.config[key_u] = str(self.get('flask', key)) | Load the specified item from the [flask] section. Type is
determined by the type of the equivalent value in app.default_config
or string if unknown. | entailment |
def _execute_migrations(self, current_version, destination_version):
"""
passed a version:
this version don't exists in the database and is younger than the last version -> do migrations up until this version
this version don't exists in the database and is older than the last version -> do nothing, is a unpredictable behavior
this version exists in the database and is older than the last version -> do migrations down until this version
didn't pass a version -> do migrations up until the last available version
"""
is_migration_up = True
# check if a version was passed to the program
if self.config.get("schema_version"):
# if was passed and this version is present in the database, check if is older than the current version
destination_version_id = self.sgdb.get_version_id_from_version_number(destination_version)
if destination_version_id:
current_version_id = self.sgdb.get_version_id_from_version_number(current_version)
# if this version is previous to the current version in database, then will be done a migration down to this version
if current_version_id > destination_version_id:
is_migration_up = False
# if was passed and this version is not present in the database and is older than the current version, raise an exception
# cause is trying to go down to something that never was done
elif current_version > destination_version:
raise Exception("Trying to migrate to a lower version wich is not found on database (%s)" % destination_version)
# getting only the migration sql files to be executed
migrations_to_be_executed = self._get_migration_files_to_be_executed(current_version, destination_version, is_migration_up)
self._execution_log("- Current version is: %s" % current_version, "GREEN", log_level_limit=1)
if migrations_to_be_executed is None or len(migrations_to_be_executed) == 0:
self._execution_log("- Destination version is: %s" % current_version, "GREEN", log_level_limit=1)
self._execution_log("\nNothing to do.\n", "PINK", log_level_limit=1)
return
self._execution_log("- Destination version is: %s" % (is_migration_up and migrations_to_be_executed[-1].version or destination_version), "GREEN", log_level_limit=1)
up_down_label = is_migration_up and "up" or "down"
if self.config.get("show_sql_only", False):
self._execution_log("\nWARNING: database migrations are not being executed ('--showsqlonly' activated)", "YELLOW", log_level_limit=1)
else:
self._execution_log("\nStarting migration %s!" % up_down_label, log_level_limit=1)
self._execution_log("*** versions: %s\n" % ([ migration.version for migration in migrations_to_be_executed]), "CYAN", log_level_limit=1)
sql_statements_executed = []
for migration in migrations_to_be_executed:
sql = is_migration_up and migration.sql_up or migration.sql_down
if not self.config.get("show_sql_only", False):
self._execution_log("===== executing %s (%s) =====" % (migration.file_name, up_down_label), log_level_limit=1)
label = None
if is_migration_up:
label = self.config.get("label_version", None)
try:
self.sgdb.change(sql, migration.version, migration.file_name, migration.sql_up, migration.sql_down, is_migration_up, self._execution_log, label)
except Exception as e:
self._execution_log("===== ERROR executing %s (%s) =====" % (migration.abspath, up_down_label), log_level_limit=1)
raise e
# paused mode
if self.config.get("paused_mode", False):
if (sys.version_info > (3, 0)):
input("* press <enter> to continue... ")
else:
raw_input("* press <enter> to continue... ")
# recording the last statement executed
sql_statements_executed.append(sql)
if self.config.get("show_sql", False) or self.config.get("show_sql_only", False):
self._execution_log("__________ SQL statements executed __________", "YELLOW", log_level_limit=1)
for sql in sql_statements_executed:
self._execution_log(sql, "YELLOW", log_level_limit=1)
self._execution_log("_____________________________________________", "YELLOW", log_level_limit=1) | passed a version:
this version don't exists in the database and is younger than the last version -> do migrations up until this version
this version don't exists in the database and is older than the last version -> do nothing, is a unpredictable behavior
this version exists in the database and is older than the last version -> do migrations down until this version
didn't pass a version -> do migrations up until the last available version | entailment |
def csrf_token():
"""
Generate a token string from bytes arrays. The token in the session is user
specific.
"""
if "_csrf_token" not in session:
session["_csrf_token"] = os.urandom(128)
return hmac.new(app.secret_key, session["_csrf_token"],
digestmod=sha1).hexdigest() | Generate a token string from bytes arrays. The token in the session is user
specific. | entailment |
def check_csrf_token():
"""Checks that token is correct, aborting if not"""
if request.method in ("GET",): # not exhaustive list
return
token = request.form.get("csrf_token")
if token is None:
app.logger.warning("Expected CSRF Token: not present")
abort(400)
if not safe_str_cmp(token, csrf_token()):
app.logger.warning("CSRF Token incorrect")
abort(400) | Checks that token is correct, aborting if not | entailment |
def get_request(cls):
"""
Get the HTTPRequest object from thread storage or from a callee by searching
each frame in the call stack.
"""
request = cls.get_global('request')
if request:
return request
try:
stack = inspect.stack()
except IndexError:
# in some cases this may return an index error
# (pyc files dont match py files for example)
return
for frame, _, _, _, _, _ in stack:
if 'request' in frame.f_locals:
if isinstance(frame.f_locals['request'], HttpRequest):
request = frame.f_locals['request']
cls.set_global('request', request)
return request | Get the HTTPRequest object from thread storage or from a callee by searching
each frame in the call stack. | entailment |
def route(app_or_blueprint, rule, **options):
"""An alternative to :meth:`flask.Flask.route` or :meth:`flask.Blueprint.route` that
always adds the ``POST`` method to the allowed endpoint request methods.
You should use this for all your view functions that would need to use Sijax.
We're doing this because Sijax uses ``POST`` for data passing,
which means that every endpoint that wants Sijax support
would have to accept ``POST`` requests.
Registering functions that would use Sijax should happen like this::
@flask_sijax.route(app, '/')
def index():
pass
If you remember to make your view functions accessible via POST
like this, you can avoid using this decorator::
@app.route('/', methods=['GET', 'POST'])
def index():
pass
"""
def decorator(f):
methods = options.pop('methods', ('GET', 'POST'))
if 'POST' not in methods:
methods = tuple(methods) + ('POST',)
options['methods'] = methods
app_or_blueprint.add_url_rule(rule, None, f, **options)
return f
return decorator | An alternative to :meth:`flask.Flask.route` or :meth:`flask.Blueprint.route` that
always adds the ``POST`` method to the allowed endpoint request methods.
You should use this for all your view functions that would need to use Sijax.
We're doing this because Sijax uses ``POST`` for data passing,
which means that every endpoint that wants Sijax support
would have to accept ``POST`` requests.
Registering functions that would use Sijax should happen like this::
@flask_sijax.route(app, '/')
def index():
pass
If you remember to make your view functions accessible via POST
like this, you can avoid using this decorator::
@app.route('/', methods=['GET', 'POST'])
def index():
pass | entailment |
def _make_response(sijax_response):
"""Takes a Sijax response object and returns a
valid Flask response object."""
from types import GeneratorType
if isinstance(sijax_response, GeneratorType):
# Streaming response using a generator (non-JSON response).
# Upon returning a response, Flask would automatically destroy
# the request data and uploaded files - done by `flask.ctx.RequestContext.auto_pop()`
# We can't allow that, since the user-provided callback we're executing
# from within the generator may want to access request data/files.
# That's why we'll tell Flask to preserve the context and we'll clean up ourselves.
request.environ['flask._preserve_context'] = True
# Clean-up code taken from `flask.testing.TestingClient`
def clean_up_context():
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
# As per the WSGI specification, `close()` would be called on iterator responses.
# Let's wrap the iterator in another one, which will forward that `close()` call to our clean-up callback.
response = Response(ClosingIterator(sijax_response, clean_up_context), direct_passthrough=True)
else:
# Non-streaming response - a single JSON string
response = Response(sijax_response)
return response | Takes a Sijax response object and returns a
valid Flask response object. | entailment |
def register_comet_callback(self, *args, **kwargs):
"""Registers a single Comet callback function
(see :ref:`comet-plugin`).
Refer to :func:`sijax.plugin.comet.register_comet_callback`
for more details - its signature differs slightly.
This method's signature is the same, except that the first
argument that :func:`sijax.plugin.comet.register_comet_callback`
expects is the Sijax instance, and this method
does that automatically, so you don't have to do it.
"""
sijax.plugin.comet.register_comet_callback(self._sijax, *args, **kwargs) | Registers a single Comet callback function
(see :ref:`comet-plugin`).
Refer to :func:`sijax.plugin.comet.register_comet_callback`
for more details - its signature differs slightly.
This method's signature is the same, except that the first
argument that :func:`sijax.plugin.comet.register_comet_callback`
expects is the Sijax instance, and this method
does that automatically, so you don't have to do it. | entailment |
def register_comet_object(self, *args, **kwargs):
"""Registers all functions from the object as Comet functions
(see :ref:`comet-plugin`).
This makes mass registration of functions a lot easier.
Refer to :func:`sijax.plugin.comet.register_comet_object`
for more details -ts signature differs slightly.
This method's signature is the same, except that the first
argument that :func:`sijax.plugin.comet.register_comet_object`
expects is the Sijax instance, and this method
does that automatically, so you don't have to do it.
"""
sijax.plugin.comet.register_comet_object(self._sijax, *args, **kwargs) | Registers all functions from the object as Comet functions
(see :ref:`comet-plugin`).
This makes mass registration of functions a lot easier.
Refer to :func:`sijax.plugin.comet.register_comet_object`
for more details -ts signature differs slightly.
This method's signature is the same, except that the first
argument that :func:`sijax.plugin.comet.register_comet_object`
expects is the Sijax instance, and this method
does that automatically, so you don't have to do it. | entailment |
def register_upload_callback(self, *args, **kwargs):
"""Registers an Upload function (see :ref:`upload-plugin`)
to handle a certain form.
Refer to :func:`sijax.plugin.upload.register_upload_callback`
for more details.
This method passes some additional arguments to your handler
functions - the ``flask.request.files`` object.
Your upload handler function's signature should look like this::
def func(obj_response, files, form_values)
:return: string - javascript code that initializes the form
"""
if 'args_extra' not in kwargs:
kwargs['args_extra'] = [request.files]
return sijax.plugin.upload.register_upload_callback(self._sijax, *args, **kwargs) | Registers an Upload function (see :ref:`upload-plugin`)
to handle a certain form.
Refer to :func:`sijax.plugin.upload.register_upload_callback`
for more details.
This method passes some additional arguments to your handler
functions - the ``flask.request.files`` object.
Your upload handler function's signature should look like this::
def func(obj_response, files, form_values)
:return: string - javascript code that initializes the form | entailment |
def execute_callback(self, *args, **kwargs):
"""Executes a callback and returns the proper response.
Refer to :meth:`sijax.Sijax.execute_callback` for more details.
"""
response = self._sijax.execute_callback(*args, **kwargs)
return _make_response(response) | Executes a callback and returns the proper response.
Refer to :meth:`sijax.Sijax.execute_callback` for more details. | entailment |
def has_permission(self, request, extra_permission=None):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
permission = request.user.is_active and request.user.is_staff
if extra_permission:
permission = permission and request.user.has_perm(extra_permission)
return permission | Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site. | entailment |
def as_view(self, view, cacheable=False, extra_permission=None):
"""
Wraps a view in authentication/caching logic
extra_permission can be used to require an extra permission for this view, such as a module permission
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request, extra_permission):
# show login pane
return self.login(request)
return view(request, *args, **kwargs)
# Mark it as never_cache
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
inner = ensure_csrf_cookie(inner)
return update_wrapper(inner, view) | Wraps a view in authentication/caching logic
extra_permission can be used to require an extra permission for this view, such as a module permission | entailment |
def media(self, request, module, path):
"""
Serve static files below a given point in the directory structure.
"""
if module == 'nexus':
document_root = os.path.join(NEXUS_ROOT, 'media')
else:
document_root = self.get_module(module).media_root
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
raise Http404("Directory indexes are not allowed here.")
if not os.path.exists(fullpath):
raise Http404('"%s" does not exist' % fullpath)
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
return HttpResponseNotModified(content_type=mimetype)
contents = open(fullpath, 'rb').read()
response = HttpResponse(contents, content_type=mimetype)
response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
response["Content-Length"] = len(contents)
return response | Serve static files below a given point in the directory structure. | entailment |
def login(self, request, form_class=None):
"Login form"
from django.contrib.auth import login as login_
from django.contrib.auth.forms import AuthenticationForm
if form_class is None:
form_class = AuthenticationForm
if request.POST:
form = form_class(request, request.POST)
if form.is_valid():
login_(request, form.get_user())
request.session.save()
return HttpResponseRedirect(request.POST.get('next') or reverse('nexus:index', current_app=self.name))
else:
request.session.set_test_cookie()
else:
form = form_class(request)
request.session.set_test_cookie()
return self.render_to_response('nexus/login.html', {
'form': form,
}, request) | Login form | entailment |
def logout(self, request):
"Logs out user and redirects them to Nexus home"
from django.contrib.auth import logout
logout(request)
return HttpResponseRedirect(reverse('nexus:index', current_app=self.name)) | Logs out user and redirects them to Nexus home | entailment |
def dashboard(self, request):
"Basic dashboard panel"
# TODO: these should be ajax
module_set = []
for namespace, module in self.get_modules():
home_url = module.get_home_url(request)
if hasattr(module, 'render_on_dashboard'):
# Show by default, unless a permission is required
if not module.permission or request.user.has_perm(module.permission):
module_set.append((module.get_dashboard_title(), module.render_on_dashboard(request), home_url))
return self.render_to_response('nexus/dashboard.html', {
'module_set': module_set,
}, request) | Basic dashboard panel | entailment |
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
} | Displays the row of buttons for delete and save. | entailment |
def autodiscover(site=None):
"""
Auto-discover INSTALLED_APPS nexus.py modules and fail silently when
not present. This forces an import on them to register any api bits they
may want.
Specifying ``site`` will register all auto discovered modules with the new site.
"""
# Bail out if autodiscover didn't finish loading from a previous call so
# that we avoid running autodiscover again when the URLconf is loaded by
# the exception handler to resolve the handler500 view. This prevents an
# admin.py module with errors from re-registering models and raising a
# spurious AlreadyRegistered exception (see #8245).
global LOADING
if LOADING:
return
LOADING = True
if site:
orig_site = globals()['site']
globals()['site'] = locals()['site']
import imp
from django.utils.importlib import import_module
from django.conf import settings
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an api.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own admin registration.
try:
app_path = import_module(app).__path__
except (AttributeError, ImportError):
continue
# Step 2: use imp.find_module to find the app's admin.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
imp.find_module('nexus_modules', app_path)
except ImportError:
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
import_module("%s.nexus_modules" % app)
# # load builtins
# from gargoyle.builtins import *
if site:
globals()['site'] = orig_site
# autodiscover was successful, reset loading flag.
LOADING = False | Auto-discover INSTALLED_APPS nexus.py modules and fail silently when
not present. This forces an import on them to register any api bits they
may want.
Specifying ``site`` will register all auto discovered modules with the new site. | entailment |
def handle_starttag(self, tagName, attributeList, isSelfClosing=False):
'''
Internal for parsing
'''
tagName = tagName.lower()
inTag = self._inTag
if isSelfClosing is False and tagName in IMPLICIT_SELF_CLOSING_TAGS:
isSelfClosing = True
newTag = AdvancedTag(tagName, attributeList, isSelfClosing, ownerDocument=self)
if self.root is None:
self.root = newTag
elif len(inTag) > 0:
inTag[-1].appendChild(newTag)
else:
raise MultipleRootNodeException()
if isSelfClosing is False:
inTag.append(newTag)
return newTag | Internal for parsing | entailment |
def handle_endtag(self, tagName):
'''
Internal for parsing
'''
try:
foundIt = False
inTag = self._inTag
for i in range(len(inTag)):
if inTag[i].tagName == tagName:
foundIt = True
break
if not foundIt:
return
# Handle closing tags which should have been closed but weren't
while inTag[-1].tagName != tagName:
inTag.pop()
inTag.pop()
except:
pass | Internal for parsing | entailment |
def handle_data(self, data):
'''
Internal for parsing
'''
if data:
inTag = self._inTag
if len(inTag) > 0:
inTag[-1].appendText(data)
elif data.strip(): #and not self.getRoot():
# Must be text prior to or after root node
raise MultipleRootNodeException() | Internal for parsing | entailment |
def handle_entityref(self, entity):
'''
Internal for parsing
'''
inTag = self._inTag
if len(inTag) > 0:
inTag[-1].appendText('&%s;' %(entity,))
else:
raise MultipleRootNodeException() | Internal for parsing | entailment |
def handle_charref(self, charRef):
'''
Internal for parsing
'''
inTag = self._inTag
if len(inTag) > 0:
inTag[-1].appendText('&#%s;' %(charRef,))
else:
raise MultipleRootNodeException() | Internal for parsing | entailment |
def handle_comment(self, comment):
'''
Internal for parsing
'''
inTag = self._inTag
if len(inTag) > 0:
inTag[-1].appendText('<!-- %s -->' %(comment,))
else:
raise MultipleRootNodeException() | Internal for parsing | entailment |
def getRootNodes(self):
'''
getRootNodes - Gets all objects at the "root" (first level; no parent). Use this if you may have multiple roots (not children of <html>)
Use this method to get objects, for example, in an AJAX request where <html> may not be your root.
Note: If there are multiple root nodes (i.e. no <html> at the top), getRoot will return a special tag. This function automatically
handles that, and returns all root nodes.
@return list<AdvancedTag> - A list of AdvancedTags which are at the root level of the tree.
'''
root = self.root
if not root:
return []
if root.tagName == INVISIBLE_ROOT_TAG:
return list(root.children)
return [root] | getRootNodes - Gets all objects at the "root" (first level; no parent). Use this if you may have multiple roots (not children of <html>)
Use this method to get objects, for example, in an AJAX request where <html> may not be your root.
Note: If there are multiple root nodes (i.e. no <html> at the top), getRoot will return a special tag. This function automatically
handles that, and returns all root nodes.
@return list<AdvancedTag> - A list of AdvancedTags which are at the root level of the tree. | entailment |
def getAllNodes(self):
'''
getAllNodes - Get every element
@return TagCollection<AdvancedTag>
'''
ret = TagCollection()
for rootNode in self.getRootNodes():
ret.append(rootNode)
ret += rootNode.getAllChildNodes()
return ret | getAllNodes - Get every element
@return TagCollection<AdvancedTag> | entailment |
def getElementsByTagName(self, tagName, root='root'):
'''
getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and root.tagName == tagName:
elements.append(root)
getElementsByTagName = self.getElementsByTagName
for child in root.children:
if child.tagName == tagName:
elements.append(child)
elements += getElementsByTagName(tagName, child)
return TagCollection(elements) | getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. | entailment |
def getElementsByName(self, name, root='root'):
'''
getElementsByName - Searches and returns all elements with a specific name.
@param name <str> - A string of the name attribute
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root' [default], the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and root.name == name:
elements.append(root)
getElementsByName = self.getElementsByName
for child in root.children:
if child.getAttribute('name') == name:
elements.append(child)
elements += getElementsByName(name, child)
return TagCollection(elements) | getElementsByName - Searches and returns all elements with a specific name.
@param name <str> - A string of the name attribute
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root' [default], the root of the parsed tree will be used. | entailment |
def getElementById(self, _id, root='root'):
'''
getElementById - Searches and returns the first (should only be one) element with the given ID.
@param id <str> - A string of the id attribute.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root' [default], the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
if isFromRoot is True and root.id == _id:
return root
getElementById = self.getElementById
for child in root.children:
if child.getAttribute('id') == _id:
return child
potential = getElementById(_id, child)
if potential is not None:
return potential
return None | getElementById - Searches and returns the first (should only be one) element with the given ID.
@param id <str> - A string of the id attribute.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root' [default], the root of the parsed tree will be used. | entailment |
def getElementsByClassName(self, className, root='root'):
'''
getElementsByClassName - Searches and returns all elements containing a given class name.
@param className <str> - A one-word class name
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root' [default], the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and className in root.classNames:
elements.append(root)
getElementsByClassName = self.getElementsByClassName
for child in root.children:
if className in child.classNames:
elements.append(child)
elements += getElementsByClassName(className, child)
return TagCollection(elements) | getElementsByClassName - Searches and returns all elements containing a given class name.
@param className <str> - A one-word class name
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root' [default], the root of the parsed tree will be used. | entailment |
def getElementsByAttr(self, attrName, attrValue, root='root'):
'''
getElementsByAttr - Searches the full tree for elements with a given attribute name and value combination. This is always a full scan.
@param attrName <lowercase str> - A lowercase attribute name
@param attrValue <str> - Expected value of attribute
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and root.getAttribute(attrName) == attrValue:
elements.append(root)
getElementsByAttr = self.getElementsByAttr
for child in root.children:
if child.getAttribute(attrName) == attrValue:
elements.append(child)
elements += getElementsByAttr(attrName, attrValue, child)
return TagCollection(elements) | getElementsByAttr - Searches the full tree for elements with a given attribute name and value combination. This is always a full scan.
@param attrName <lowercase str> - A lowercase attribute name
@param attrValue <str> - Expected value of attribute
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. | entailment |
def getElementsWithAttrValues(self, attrName, attrValues, root='root'):
'''
getElementsWithAttrValues - Returns elements with an attribute, named by #attrName contains one of the values in the list, #values
@param attrName <lowercase str> - A lowercase attribute name
@param attrValues set<str> - A set of all valid values.
@return - TagCollection of all matching elements
'''
(root, isFromRoot) = self._handleRootArg(root)
if type(attrValues) != set:
attrValues = set(attrValues)
return root.getElementsWithAttrValues(attrName, attrValues) | getElementsWithAttrValues - Returns elements with an attribute, named by #attrName contains one of the values in the list, #values
@param attrName <lowercase str> - A lowercase attribute name
@param attrValues set<str> - A set of all valid values.
@return - TagCollection of all matching elements | entailment |
def getElementsCustomFilter(self, filterFunc, root='root'):
'''
getElementsCustomFilter - Scan elements using a provided function
@param filterFunc <function>(node) - A function that takes an AdvancedTag as an argument, and returns True if some arbitrary criteria is met
@return - TagCollection of all matching elements
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and filterFunc(root) is True:
elements.append(root)
getElementsCustomFilter = self.getElementsCustomFilter
for child in root.children:
if filterFunc(child) is True:
elements.append(child)
elements += getElementsCustomFilter(filterFunc, child)
return TagCollection(elements) | getElementsCustomFilter - Scan elements using a provided function
@param filterFunc <function>(node) - A function that takes an AdvancedTag as an argument, and returns True if some arbitrary criteria is met
@return - TagCollection of all matching elements | entailment |
def getFirstElementCustomFilter(self, filterFunc, root='root'):
'''
getFirstElementCustomFilter - Scan elements using a provided function, stop and return the first match.
@see getElementsCustomFilter to match multiple elements
@param filterFunc <function>(node) - A function that takes an AdvancedTag as an argument, and returns True if some arbitrary criteria is met
@return - An AdvancedTag of the node that matched, or None if no match.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and filterFunc(root) is True:
return root
getFirstElementCustomFilter = self.getFirstElementCustomFilter
for child in root.children:
if filterFunc(child) is True:
return child
subRet = getFirstElementCustomFilter(filterFunc, child)
if subRet:
return subRet
return None | getFirstElementCustomFilter - Scan elements using a provided function, stop and return the first match.
@see getElementsCustomFilter to match multiple elements
@param filterFunc <function>(node) - A function that takes an AdvancedTag as an argument, and returns True if some arbitrary criteria is met
@return - An AdvancedTag of the node that matched, or None if no match. | entailment |
def contains(self, em):
'''
Checks if #em is found anywhere within this element tree
@param em <AdvancedTag> - Tag of interest
@return <bool> - If element #em is within this tree
'''
for rootNode in self.getRootNodes():
if rootNode.contains(em):
return True
return False | Checks if #em is found anywhere within this element tree
@param em <AdvancedTag> - Tag of interest
@return <bool> - If element #em is within this tree | entailment |
def containsUid(self, uid):
'''
Check if #uid is found anywhere within this element tree
@param uid <uuid.UUID> - Uid
@return <bool> - If #uid is found within this tree
'''
for rootNode in self.getRootNodes():
if rootNode.containsUid(uid):
return True
return False | Check if #uid is found anywhere within this element tree
@param uid <uuid.UUID> - Uid
@return <bool> - If #uid is found within this tree | entailment |
def find(self, **kwargs):
'''
find - Perform a search of elements using attributes as keys and potential values as values
(i.e. parser.find(name='blah', tagname='span') will return all elements in this document
with the name "blah" of the tag type "span" )
Arguments are key = value, or key can equal a tuple/list of values to match ANY of those values.
Append a key with __contains to test if some strs (or several possible strs) are within an element
Append a key with __icontains to perform the same __contains op, but ignoring case
Special keys:
tagname - The tag name of the element
text - The text within an element
NOTE: Empty string means both "not set" and "no value" in this implementation.
NOTE: If you installed the QueryableList module (i.e. ran setup.py without --no-deps) it is
better to use the "filter"/"filterAnd" or "filterOr" methods, which are also available
on all tags and tag collections (tag collections also have filterAllAnd and filterAllOr)
@return TagCollection<AdvancedTag> - A list of tags that matched the filter criteria
'''
if not kwargs:
return TagCollection()
# Because of how closures work in python, need a function to generate these lambdas
# because the closure basically references "current key in iteration" and not
# "actual instance" of variable. Seems to me to be a bug... but whatever
def _makeTagnameLambda(tagName):
return lambda em : em.tagName == tagName
def _makeAttributeLambda(_key, _value):
return lambda em : em.getAttribute(_key, '') == _value
def _makeTagnameInLambda(tagNames):
return lambda em : em.tagName in tagNames
def _makeAttributeInLambda(_key, _values):
return lambda em : em.getAttribute(_key, '') in _values
def _makeTextLambda(_value):
return lambda em : em.text == _value
def _makeTextInLambda(_values):
return lambda em : em.text in _values
def _makeAttributeContainsLambda(_key, _value, icontains=False):
if icontains is False:
return lambda em : _value in em.getAttribute(_key, '')
else:
_value = _value.lower()
return lambda em : _value in em.getAttribute(_key, '').lower()
def _makeTextContainsLambda(_value, icontains=False):
if icontains is False:
return lambda em : _value in em.text
else:
_value = _value.lower()
return lambda em : _value in em.text.lower()
def _makeAttributeContainsInLambda(_key, _values, icontains=False):
if icontains:
_values = tuple([x.lower() for x in _values])
def _testFunc(em):
attrValue = em.getAttribute(_key, '')
if icontains:
attrValue = attrValue.lower()
for value in _values:
if value in attrValue:
return True
return False
return _testFunc
def _makeTextContainsInLambda(_values, icontains=False):
if icontains:
_values = tuple([x.lower() for x in _values])
def _testFunc(em):
text = em.text
if icontains:
text = text.lower()
for value in _values:
if value in text:
return True
return False
return _testFunc
# This will hold all the functions we will chain for matching
matchFunctions = []
# Iterate over all the filter portions, and build a filter.
for key, value in kwargs.items():
key = key.lower()
endsIContains = key.endswith('__icontains')
endsContains = key.endswith('__contains')
isValueList = isinstance(value, (list, tuple))
thisFunc = None
if endsIContains or endsContains:
key = re.sub('__[i]{0,1}contains$', '', key)
if key == 'tagname':
raise ValueError('tagname is not supported for contains')
if isValueList:
if key == 'text':
thisFunc = _makeTextContainsInLambda(value, icontains=endsIContains)
else:
thisFunc = _makeAttributeContainsLambda(key, value, icontains=endsIContains)
else:
if key == 'text':
thisFunc = _makeTextContainsLambda(value, icontains=endsIContains)
else:
thisFunc = _makeAttributeContainsLambda(key, value, icontains=endsIContains)
else:
# Not contains, straight up
if isValueList:
if key == 'tagname':
thisFunc = _makeTagnameInLambda(value)
elif key == 'text':
thisFunc = _makeTextInLambda(value)
else:
thisFunc = _makeAttributeInLambda(key, value)
else:
if key == 'tagname':
thisFunc = _makeTagnameLambda(value)
elif key == 'text':
thisFunc = _makeTextLambda(value)
else:
thisFunc = _makeAttributeLambda(key, value)
matchFunctions.append( thisFunc )
# The actual matching function - This will run through the assembled
# #matchFunctions list, testing the element against each match
# and returning all elements in a TagCollection that match this list.
def doMatchFunc(em):
for matchFunction in matchFunctions:
if matchFunction(em) is False:
return False
return True
return self.getElementsCustomFilter(doMatchFunc) | find - Perform a search of elements using attributes as keys and potential values as values
(i.e. parser.find(name='blah', tagname='span') will return all elements in this document
with the name "blah" of the tag type "span" )
Arguments are key = value, or key can equal a tuple/list of values to match ANY of those values.
Append a key with __contains to test if some strs (or several possible strs) are within an element
Append a key with __icontains to perform the same __contains op, but ignoring case
Special keys:
tagname - The tag name of the element
text - The text within an element
NOTE: Empty string means both "not set" and "no value" in this implementation.
NOTE: If you installed the QueryableList module (i.e. ran setup.py without --no-deps) it is
better to use the "filter"/"filterAnd" or "filterOr" methods, which are also available
on all tags and tag collections (tag collections also have filterAllAnd and filterAllOr)
@return TagCollection<AdvancedTag> - A list of tags that matched the filter criteria | entailment |
def getHTML(self):
'''
getHTML - Get the full HTML as contained within this tree.
If parsed from a document, this will contain the original whitespacing.
@returns - <str> of html
@see getFormattedHTML
@see getMiniHTML
'''
root = self.getRoot()
if root is None:
raise ValueError('Did not parse anything. Use parseFile or parseStr')
if self.doctype:
doctypeStr = '<!%s>\n' %(self.doctype)
else:
doctypeStr = ''
# 6.6.0: If we have a real root tag, print the outerHTML. If we have a fake root tag (for multiple root condition),
# then print the innerHTML (skipping the outer root tag). Otherwise, we will miss
# untagged text (between the multiple root nodes).
rootNode = self.getRoot()
if rootNode.tagName == INVISIBLE_ROOT_TAG:
return doctypeStr + rootNode.innerHTML
else:
return doctypeStr + rootNode.outerHTML | getHTML - Get the full HTML as contained within this tree.
If parsed from a document, this will contain the original whitespacing.
@returns - <str> of html
@see getFormattedHTML
@see getMiniHTML | entailment |
def getFormattedHTML(self, indent=' '):
'''
getFormattedHTML - Get formatted and xhtml of this document, replacing the original whitespace
with a pretty-printed version
@param indent - space/tab/newline of each level of indent, or integer for how many spaces per level
@return - <str> Formatted html
@see getHTML - Get HTML with original whitespace
@see getMiniHTML - Get HTML with only functional whitespace remaining
'''
from .Formatter import AdvancedHTMLFormatter
html = self.getHTML()
formatter = AdvancedHTMLFormatter(indent, None) # Do not double-encode
formatter.feed(html)
return formatter.getHTML() | getFormattedHTML - Get formatted and xhtml of this document, replacing the original whitespace
with a pretty-printed version
@param indent - space/tab/newline of each level of indent, or integer for how many spaces per level
@return - <str> Formatted html
@see getHTML - Get HTML with original whitespace
@see getMiniHTML - Get HTML with only functional whitespace remaining | entailment |
def getMiniHTML(self):
'''
getMiniHTML - Gets the HTML representation of this document without any pretty formatting
and disregarding original whitespace beyond the functional.
@return <str> - HTML with only functional whitespace present
'''
from .Formatter import AdvancedHTMLMiniFormatter
html = self.getHTML()
formatter = AdvancedHTMLMiniFormatter(None) # Do not double-encode
formatter.feed(html)
return formatter.getHTML() | getMiniHTML - Gets the HTML representation of this document without any pretty formatting
and disregarding original whitespace beyond the functional.
@return <str> - HTML with only functional whitespace present | entailment |
def _reset(self):
'''
_reset - reset this object. Assigned to .reset after __init__ call.
'''
HTMLParser.reset(self)
self.root = None
self.doctype = None
self._inTag = [] | _reset - reset this object. Assigned to .reset after __init__ call. | entailment |
def feed(self, contents):
'''
feed - Feed contents. Use parseStr or parseFile instead.
@param contents - Contents
'''
contents = stripIEConditionals(contents)
try:
HTMLParser.feed(self, contents)
except MultipleRootNodeException:
self.reset()
HTMLParser.feed(self, "%s%s" %(addStartTag(contents, INVISIBLE_ROOT_TAG_START), INVISIBLE_ROOT_TAG_END)) | feed - Feed contents. Use parseStr or parseFile instead.
@param contents - Contents | entailment |
def parseFile(self, filename):
'''
parseFile - Parses a file and creates the DOM tree and indexes
@param filename <str/file> - A string to a filename or a file object. If file object, it will not be closed, you must close.
'''
self.reset()
if isinstance(filename, file):
contents = filename.read()
else:
with codecs.open(filename, 'r', encoding=self.encoding) as f:
contents = f.read()
self.feed(contents) | parseFile - Parses a file and creates the DOM tree and indexes
@param filename <str/file> - A string to a filename or a file object. If file object, it will not be closed, you must close. | entailment |
def parseStr(self, html):
'''
parseStr - Parses a string and creates the DOM tree and indexes.
@param html <str> - valid HTML
'''
self.reset()
if isinstance(html, bytes):
self.feed(html.decode(self.encoding))
else:
self.feed(html) | parseStr - Parses a string and creates the DOM tree and indexes.
@param html <str> - valid HTML | entailment |
def createElementFromHTML(cls, html, encoding='utf-8'):
'''
createElementFromHTML - Creates an element from a string of HTML.
If this could create multiple root-level elements (children are okay),
you must use #createElementsFromHTML which returns a list of elements created.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@raises MultipleRootNodeException - If given html would produce multiple root-level elements (use #createElementsFromHTML instead)
@return AdvancedTag - A single AdvancedTag
NOTE: If there is text outside the tag, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML
'''
parser = cls(encoding=encoding)
html = stripIEConditionals(html)
try:
HTMLParser.feed(parser, html)
except MultipleRootNodeException:
raise MultipleRootNodeException('Multiple nodes passed to createElementFromHTML method. Use #createElementsFromHTML instead to get a list of AdvancedTag elements.')
rootNode = parser.getRoot()
rootNode.remove()
return rootNode | createElementFromHTML - Creates an element from a string of HTML.
If this could create multiple root-level elements (children are okay),
you must use #createElementsFromHTML which returns a list of elements created.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@raises MultipleRootNodeException - If given html would produce multiple root-level elements (use #createElementsFromHTML instead)
@return AdvancedTag - A single AdvancedTag
NOTE: If there is text outside the tag, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML | entailment |
def createElementsFromHTML(cls, html, encoding='utf-8'):
'''
createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements
children of these root-level nodes are accessable via the usual means.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@return list<AdvancedTag> - The root (top-level) tags from parsed html.
NOTE: If there is text outside the tags, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML
'''
# TODO: If text is present outside a tag, it will be lost.
parser = cls(encoding=encoding)
parser.parseStr(html)
rootNode = parser.getRoot()
rootNode.remove() # Detatch from temp document
if isInvisibleRootTag(rootNode):
return rootNode.children
return [rootNode] | createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements
children of these root-level nodes are accessable via the usual means.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@return list<AdvancedTag> - The root (top-level) tags from parsed html.
NOTE: If there is text outside the tags, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML | entailment |
def createBlocksFromHTML(cls, html, encoding='utf-8'):
'''
createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node
'''
parser = cls(encoding=encoding)
parser.parseStr(html)
rootNode = parser.getRoot()
rootNode.remove()
return rootNode.blocks | createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node | entailment |
def handle_starttag(self, tagName, attributeList, isSelfClosing=False):
'''
internal for parsing
'''
newTag = AdvancedHTMLParser.handle_starttag(self, tagName, attributeList, isSelfClosing)
self._indexTag(newTag)
return newTag | internal for parsing | entailment |
def reindex(self, newIndexIDs=None, newIndexNames=None, newIndexClassNames=None, newIndexTagNames=None):
'''
reindex - reindex the tree. Optionally, change what fields are indexed.
@param newIndexIDs <bool/None> - None to leave same, otherwise new value to index IDs
@parma newIndexNames <bool/None> - None to leave same, otherwise new value to index names
@param newIndexClassNames <bool/None> - None to leave same, otherwise new value to index class names
@param newIndexTagNames <bool/None> - None to leave same, otherwise new value to index tag names
'''
if newIndexIDs is not None:
self.indexIDs = newIndexIDs
if newIndexNames is not None:
self.indexNames = newIndexNames
if newIndexClassNames is not None:
self.newIndexClassNames = newIndexClassNames
if newIndexTagNames is not None:
self.newIndexTagNames = newIndexTagNames
self._resetIndexInternal()
self._indexTagRecursive(self.root) | reindex - reindex the tree. Optionally, change what fields are indexed.
@param newIndexIDs <bool/None> - None to leave same, otherwise new value to index IDs
@parma newIndexNames <bool/None> - None to leave same, otherwise new value to index names
@param newIndexClassNames <bool/None> - None to leave same, otherwise new value to index class names
@param newIndexTagNames <bool/None> - None to leave same, otherwise new value to index tag names | entailment |
def disableIndexing(self):
'''
disableIndexing - Disables indexing. Consider using plain AdvancedHTMLParser class.
Maybe useful in some scenarios where you want to parse, add a ton of elements, then index
and do a bunch of searching.
'''
self.indexIDs = self.indexNames = self.indexClassNames = self.indexTagNames = False
self._resetIndexInternal() | disableIndexing - Disables indexing. Consider using plain AdvancedHTMLParser class.
Maybe useful in some scenarios where you want to parse, add a ton of elements, then index
and do a bunch of searching. | entailment |
def addIndexOnAttribute(self, attributeName):
'''
addIndexOnAttribute - Add an index for an arbitrary attribute. This will be used by the getElementsByAttr function.
You should do this prior to parsing, or call reindex. Otherwise it will be blank. "name" and "id" will have no effect.
@param attributeName <lowercase str> - An attribute name. Will be lowercased.
'''
attributeName = attributeName.lower()
self._otherAttributeIndexes[attributeName] = {}
def _otherIndexFunction(self, tag):
thisAttribute = tag.getAttribute(attributeName)
if thisAttribute is not None:
if thisAttribute not in self._otherAttributeIndexes[attributeName]:
self._otherAttributeIndexes[attributeName][thisAttribute] = []
self._otherAttributeIndexes[attributeName][thisAttribute].append(tag)
self.otherAttributeIndexFunctions[attributeName] = _otherIndexFunction | addIndexOnAttribute - Add an index for an arbitrary attribute. This will be used by the getElementsByAttr function.
You should do this prior to parsing, or call reindex. Otherwise it will be blank. "name" and "id" will have no effect.
@param attributeName <lowercase str> - An attribute name. Will be lowercased. | entailment |
def removeIndexOnAttribute(self, attributeName):
'''
removeIndexOnAttribute - Remove an attribute from indexing (for getElementsByAttr function) and remove indexed data.
@param attributeName <lowercase str> - An attribute name. Will be lowercased. "name" and "id" will have no effect.
'''
attributeName = attributeName.lower()
if attributeName in self.otherAttributeIndexFunctions:
del self.otherAttributeIndexFunctions[attributeName]
if attributeName in self._otherAttributeIndexes:
del self._otherAttributeIndexes[attributeName] | removeIndexOnAttribute - Remove an attribute from indexing (for getElementsByAttr function) and remove indexed data.
@param attributeName <lowercase str> - An attribute name. Will be lowercased. "name" and "id" will have no effect. | entailment |
def getElementsByTagName(self, tagName, root='root', useIndex=True):
'''
getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex - If True [default] and tag names are set to be indexed [default, see constructor], only the index will be used. If False, all tags
will be searched.
'''
(root, isFromRoot) = self._handleRootArg(root)
if useIndex is True and self.indexTagNames is True:
elements = self._tagNameMap.get(tagName, []) # Use .get here as to not create a lot of extra indexes on the defaultdict for misses
if isFromRoot is False:
_hasTagInParentLine = self._hasTagInParentLine
elements = [x for x in elements if _hasTagInParentLine(x, root)]
return TagCollection(elements)
return AdvancedHTMLParser.getElementsByTagName(self, tagName, root) | getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex - If True [default] and tag names are set to be indexed [default, see constructor], only the index will be used. If False, all tags
will be searched. | entailment |
def getElementsByName(self, name, root='root', useIndex=True):
'''
getElementsByName - Searches and returns all elements with a specific name.
@param name <str> - A string of the name attribute
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex <bool> If useIndex is True and names are indexed [see constructor] only the index will be used. Otherwise a full search is performed.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if useIndex is True and self.indexNames is True:
elements = self._nameMap.get(name, [])
if isFromRoot is False:
_hasTagInParentLine = self._hasTagInParentLine
elements = [x for x in elements if _hasTagInParentLine(x, root)]
return TagCollection(elements)
return AdvancedHTMLParser.getElementsByName(self, name, root) | getElementsByName - Searches and returns all elements with a specific name.
@param name <str> - A string of the name attribute
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex <bool> If useIndex is True and names are indexed [see constructor] only the index will be used. Otherwise a full search is performed. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.