sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _get(self, route, stream=False):
"""
run a get request against an url. Returns the response which can optionally be streamed
"""
log.debug("Running GET request against %s" % route)
return r.get(self._url(route), auth=c.auth, stream=stream) | run a get request against an url. Returns the response which can optionally be streamed | entailment |
def get_contents(self, folder: Folder):
"""
List all contents of a folder. Returns a list of all Documents and Folders (in this order) in the folder.
"""
log.debug("Listing Contents of %s/%s" % (folder.course.id, folder.id))
if isinstance(folder, Course):
response = json.loads(self._get('/api/documents/%s/folder' % folder.course.id).text)
else:
response = json.loads(self._get('/api/documents/%s/folder/%s' % (folder.course.id, folder.id)).text)
log.debug("Got response: %s" % response)
documents = [Document.from_response(response, folder) for response in response["documents"]]
folders = [Folder.from_response(response, folder) for response in response["folders"]]
return documents + folders | List all contents of a folder. Returns a list of all Documents and Folders (in this order) in the folder. | entailment |
def download_document(self, document: Document, overwrite=True, path=None):
"""
Download a document to the given path. if no path is provided the path is constructed frome the base_url + stud.ip path + filename.
If overwrite is set the local version will be overwritten if the file was changed on studip since the last check
"""
if not path:
path = os.path.join(os.path.expanduser(c["base_path"]), document.path)
if (self.modified(document) and overwrite) or not os.path.exists(join(path, document.title)):
log.info("Downloading %s" % join(path, document.title))
file = self._get('/api/documents/%s/download' % document.id, stream=True)
os.makedirs(path, exist_ok=True)
with open(join(path, document.title), 'wb') as f:
shutil.copyfileobj(file.raw, f) | Download a document to the given path. if no path is provided the path is constructed frome the base_url + stud.ip path + filename.
If overwrite is set the local version will be overwritten if the file was changed on studip since the last check | entailment |
def get_semester_title(self, node: BaseNode):
"""
get the semester of a node
"""
log.debug("Getting Semester Title for %s" % node.course.id)
return self._get_semester_from_id(node.course.semester) | get the semester of a node | entailment |
def get_courses(self):
"""
use the base_url and auth data from the configuration to list all courses the user is subscribed to
"""
log.info("Listing Courses...")
courses = json.loads(self._get('/api/courses').text)["courses"]
courses = [Course.from_response(course) for course in courses]
log.debug("Courses: %s" % [str(entry) for entry in courses])
return courses | use the base_url and auth data from the configuration to list all courses the user is subscribed to | entailment |
def _minimize_scalar(
self, desc="Progress", rtol=1.4902e-08, atol=1.4902e-08, verbose=True
):
"""
Minimize a scalar function using Brent's method.
Parameters
----------
verbose : bool
``True`` for verbose output; ``False`` otherwise.
"""
from tqdm import tqdm
from numpy import asarray
from brent_search import minimize as brent_minimize
variables = self._variables.select(fixed=False)
if len(variables) != 1:
raise ValueError("The number of variables must be equal to one.")
var = variables[variables.names()[0]]
progress = tqdm(desc=desc, disable=not verbose)
def func(x):
progress.update(1)
var.value = x
return self.__sign * self.value()
r = asarray(
brent_minimize(func, a=var.bounds[0], b=var.bounds[1], rtol=rtol, atol=atol)
)
var.value = r[0]
progress.close() | Minimize a scalar function using Brent's method.
Parameters
----------
verbose : bool
``True`` for verbose output; ``False`` otherwise. | entailment |
def digest(self, alg='sha256', b64=True, strip=True):
"""return a url-safe hash of the string, optionally (and by default) base64-encoded
alg='sha256' = the hash algorithm, must be in hashlib
b64=True = whether to base64-encode the output
strip=True = whether to strip trailing '=' from the base64 output
Using the default arguments returns a url-safe base64-encoded SHA-256 hash of the string.
Length of the digest with different algorithms, using b64=True and strip=True:
* SHA224 = 38
* SHA256 = 43 (DEFAULT)
* SHA384 = 64
* SHA512 = 86
"""
import base64, hashlib
h = hashlib.new(alg)
h.update(str(self).encode('utf-8'))
if b64 == True:
# this returns a string with a predictable amount of = padding at the end
b = base64.urlsafe_b64encode(h.digest()).decode('ascii')
if strip == True:
b = b.rstrip('=')
return b
else:
return h.hexdigest() | return a url-safe hash of the string, optionally (and by default) base64-encoded
alg='sha256' = the hash algorithm, must be in hashlib
b64=True = whether to base64-encode the output
strip=True = whether to strip trailing '=' from the base64 output
Using the default arguments returns a url-safe base64-encoded SHA-256 hash of the string.
Length of the digest with different algorithms, using b64=True and strip=True:
* SHA224 = 38
* SHA256 = 43 (DEFAULT)
* SHA384 = 64
* SHA512 = 86 | entailment |
def camelify(self):
"""turn a string to CamelCase, omitting non-word characters"""
outstring = self.titleify(allwords=True)
outstring = re.sub(r"&[^;]+;", " ", outstring)
outstring = re.sub(r"\W+", "", outstring)
return String(outstring) | turn a string to CamelCase, omitting non-word characters | entailment |
def titleify(self, lang='en', allwords=False, lastword=True):
"""takes a string and makes a title from it"""
if lang in LOWERCASE_WORDS:
lc_words = LOWERCASE_WORDS[lang]
else:
lc_words = []
s = str(self).strip()
l = re.split(r"([_\W]+)", s)
for i in range(len(l)):
l[i] = l[i].lower()
if (
allwords == True
or i == 0
or (lastword == True and i == len(l) - 1)
or l[i].lower() not in lc_words
):
w = l[i]
if len(w) > 1:
w = w[0].upper() + w[1:]
else:
w = w.upper()
l[i] = w
s = "".join(l)
return String(s) | takes a string and makes a title from it | entailment |
def identifier(self, camelsplit=False, ascii=True):
"""return a python identifier from the string (underscore separators)"""
return self.nameify(camelsplit=camelsplit, ascii=ascii, sep='_') | return a python identifier from the string (underscore separators) | entailment |
def nameify(self, camelsplit=False, ascii=True, sep='-'):
"""return an XML name (hyphen-separated by default, initial underscore if non-letter)"""
s = String(str(self)) # immutable
if camelsplit == True:
s = s.camelsplit()
s = s.hyphenify(ascii=ascii).replace('-', sep)
if len(s) == 0 or re.match("[A-Za-z_]", s[0]) is None:
s = "_" + s
return String(s) | return an XML name (hyphen-separated by default, initial underscore if non-letter) | entailment |
def hyphenify(self, ascii=False):
"""Turn non-word characters (incl. underscore) into single hyphens.
If ascii=True, return ASCII-only.
If also lossless=True, use the UTF-8 codes for the non-ASCII characters.
"""
s = str(self)
s = re.sub("""['"\u2018\u2019\u201c\u201d]""", '', s) # quotes
s = re.sub(r'(?:\s|%20)+', '-', s) # whitespace
if ascii == True: # ASCII-only
s = s.encode('ascii', 'xmlcharrefreplace').decode('ascii') # use entities
s = re.sub("&?([^;]*?);", r'.\1-', s) # entities
s = s.replace('#', 'u')
s = re.sub(r"\W+", '-', s).strip(' -')
return String(s) | Turn non-word characters (incl. underscore) into single hyphens.
If ascii=True, return ASCII-only.
If also lossless=True, use the UTF-8 codes for the non-ASCII characters. | entailment |
def camelsplit(self):
"""Turn a CamelCase string into a string with spaces"""
s = str(self)
for i in range(len(s) - 1, -1, -1):
if i != 0 and (
(s[i].isupper() and s[i - 1].isalnum() and not s[i - 1].isupper())
or (s[i].isnumeric() and s[i - 1].isalpha())
):
s = s[:i] + ' ' + s[i:]
return String(s.strip()) | Turn a CamelCase string into a string with spaces | entailment |
def includeme(config):
"""Pyramid pluggable and discoverable function."""
global_settings = config.registry.settings
settings = local_settings(global_settings, PREFIX)
try:
file = settings['file']
except KeyError:
raise KeyError("Must supply '{}.file' configuration value "
"in order to configure logging via '{}'."
.format(PREFIX, PROJECT))
with open(file, 'r') as f:
logging_config = yaml.load(f)
dictConfig(logging_config)
# Enable transit logging?
if asbool(settings.get('transit_logging.enabled?', False)):
config.add_tween('pyramid_sawing.main.TransitLogger') | Pyramid pluggable and discoverable function. | entailment |
def run(self):
"""Executed on startup of application"""
self.api = self.context.get("cls")(self.context)
self.context["inst"].append(self) # Adapters used by strategies
for call, calldata in self.context.get("calls", {}).items():
def loop():
"""Loop on event scheduler, calling calls"""
while not self.stopped.wait(calldata.get("delay", None)):
self.call(call, calldata.get("arguments", None))
self.thread[call] = Process(target=loop)
self.thread[call].start() | Executed on startup of application | entailment |
def call(self, callname, arguments=None):
"""Executed on each scheduled iteration"""
# See if a method override exists
action = getattr(self.api, callname, None)
if action is None:
try:
action = self.api.ENDPOINT_OVERRIDES.get(callname, None)
except AttributeError:
action = callname
if not callable(action):
request = self._generate_request(action, arguments)
if action is None:
return self._generate_result(
callname, self.api.call(*call_args(callname, arguments)))
return self._generate_result(
callname, self.api.call(*call_args(action, arguments)))
request = self._generate_request(callname, arguments)
return self._generate_result(callname, action(request)) | Executed on each scheduled iteration | entailment |
def _generate_request(self, callname, request):
"""Generate a request object for delivery to the API"""
# Retrieve path from API class
schema = self.api.request_schema()
schema.context['callname'] = callname
return schema.dump(request).data.get("payload") | Generate a request object for delivery to the API | entailment |
def _generate_result(self, callname, result):
"""Generate a results object for delivery to the context object"""
# Retrieve path from API class
schema = self.api.result_schema()
schema.context['callname'] = callname
self.callback(schema.load(result), self.context) | Generate a results object for delivery to the context object | entailment |
def excel_key(index):
"""create a key for index by converting index into a base-26 number, using A-Z as the characters."""
X = lambda n: ~n and X((n // 26)-1) + chr(65 + (n % 26)) or ''
return X(int(index)) | create a key for index by converting index into a base-26 number, using A-Z as the characters. | entailment |
def convert_to_crash_data(raw_crash, processed_crash):
"""
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
"""
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data | Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema | entailment |
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text | Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped | entailment |
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None | Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one | entailment |
def _is_exception(exceptions, before_token, after_token, token):
"""Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool
"""
if not exceptions:
return False
for s in exceptions:
if before_token.endswith(s):
return True
if s in token:
return True
return False | Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool | entailment |
def collapse(
function,
open_string,
close_string,
replacement='',
exceptions=None,
):
"""Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed
"""
collapsed = []
open_count = 0
open_token = []
for i, char in enumerate(function):
if not open_count:
if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa
open_count += 1
open_token = [char]
else:
collapsed.append(char)
else:
if char == open_string:
open_count += 1
open_token.append(char)
elif char == close_string:
open_count -= 1
open_token.append(char)
if open_count == 0:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
open_token = []
else:
open_token.append(char)
if open_count:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
return ''.join(collapsed) | Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed | entailment |
def drop_prefix_and_return_type(function):
"""Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
"""
DELIMITERS = {
'(': ')',
'{': '}',
'[': ']',
'<': '>',
'`': "'"
}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
# The list of tokens accumulated so far
tokens = []
# Keeps track of open delimiters so we can match and close them
levels = []
# The current token we're building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
# This is an unmatched close.
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
# It's possible for the function signature to have a space between
# the function name and the parenthesized arguments or [clone ...]
# thing. If that's the case, we join the last two tokens. We keep doing
# that until the last token is nice.
#
# Example:
#
# somefunc (int arg1, int arg2)
# ^
# somefunc(int arg1, int arg2) [clone .cold.111]
# ^
# somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]
# ^ ^
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1] | Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value | entailment |
def wait_connected(self, conns=None, timeout=None):
'''Wait for connections to be made and their handshakes to finish
:param conns:
a single or list of (host, port) tuples with the connections that
must be finished before the method will return. defaults to all the
peers the :class:`Hub` was instantiated with.
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` one or more
failed.
'''
if timeout:
deadline = time.time() + timeout
conns = conns or self._started_peers.keys()
if not hasattr(conns, "__iter__"):
conns = [conns]
for peer_addr in conns:
remaining = max(0, deadline - time.time()) if timeout else None
if not self._started_peers[peer_addr].wait_connected(remaining):
if timeout:
log.warn("connect wait timed out after %.2f seconds" %
timeout)
return False
return True | Wait for connections to be made and their handshakes to finish
:param conns:
a single or list of (host, port) tuples with the connections that
must be finished before the method will return. defaults to all the
peers the :class:`Hub` was instantiated with.
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` one or more
failed. | entailment |
def shutdown(self):
'Close all peer connections and stop listening for new ones'
log.info("shutting down")
for peer in self._dispatcher.peers.values():
peer.go_down(reconnect=False)
if self._listener_coro:
backend.schedule_exception(
errors._BailOutOfListener(), self._listener_coro)
if self._udp_listener_coro:
backend.schedule_exception(
errors._BailOutOfListener(), self._udp_listener_coro) | Close all peer connections and stop listening for new ones | entailment |
def accept_publish(
self, service, mask, value, method, handler=None, schedule=False):
'''Set a handler for incoming publish messages
:param service: the incoming message must have this service
:type service: anything hash-able
:param mask:
value to be bitwise-and'ed against the incoming id, the result of
which must mask the 'value' param
:type mask: int
:param value:
the result of `routing_id & mask` must match this in order to
trigger the handler
:type value: int
:param method: the method name
:type method: string
:param handler:
the function that will be called on incoming matching messages
:type handler: callable
:param schedule:
whether to schedule a separate greenlet running ``handler`` for
each matching message. default ``False``.
:type schedule: bool
:raises:
- :class:`ImpossibleSubscription
<junction.errors.ImpossibleSubscription>` if there is no routing
ID which could possibly match the mask/value pair
- :class:`OverlappingSubscription
<junction.errors.OverlappingSubscription>` if a prior publish
registration that overlaps with this one (there is a
service/method/routing id that would match *both* this *and* a
previously-made registration).
'''
# support @hub.accept_publish(serv, mask, val, meth) decorator usage
if handler is None:
return lambda h: self.accept_publish(
service, mask, value, method, h, schedule)
log.info("accepting publishes%s %r" % (
" scheduled" if schedule else "",
(service, (mask, value), method),))
self._dispatcher.add_local_subscription(const.MSG_TYPE_PUBLISH,
service, mask, value, method, handler, schedule)
return handler | Set a handler for incoming publish messages
:param service: the incoming message must have this service
:type service: anything hash-able
:param mask:
value to be bitwise-and'ed against the incoming id, the result of
which must mask the 'value' param
:type mask: int
:param value:
the result of `routing_id & mask` must match this in order to
trigger the handler
:type value: int
:param method: the method name
:type method: string
:param handler:
the function that will be called on incoming matching messages
:type handler: callable
:param schedule:
whether to schedule a separate greenlet running ``handler`` for
each matching message. default ``False``.
:type schedule: bool
:raises:
- :class:`ImpossibleSubscription
<junction.errors.ImpossibleSubscription>` if there is no routing
ID which could possibly match the mask/value pair
- :class:`OverlappingSubscription
<junction.errors.OverlappingSubscription>` if a prior publish
registration that overlaps with this one (there is a
service/method/routing id that would match *both* this *and* a
previously-made registration). | entailment |
def unsubscribe_publish(self, service, mask, value):
'''Remove a publish subscription
:param service: the service of the subscription to remove
:type service: anything hash-able
:param mask: the mask of the subscription to remove
:type mask: int
:param value: the value in the subscription to remove
:type value: int
:returns:
a boolean indicating whether the subscription was there (True) and
removed, or not (False)
'''
log.info("unsubscribing from publish %r" % (
(service, (mask, value)),))
return self._dispatcher.remove_local_subscription(
const.MSG_TYPE_PUBLISH, service, mask, value) | Remove a publish subscription
:param service: the service of the subscription to remove
:type service: anything hash-able
:param mask: the mask of the subscription to remove
:type mask: int
:param value: the value in the subscription to remove
:type value: int
:returns:
a boolean indicating whether the subscription was there (True) and
removed, or not (False) | entailment |
def publish(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False, udp=False):
'''Send a 1-way message
:param service: the service name (the routing top level)
:type service: anything hash-able
:param int routing_id:
the id used for routing within the registered handlers of the
service
:param string method: the method name to call
:param tuple args:
The positional arguments to send along with the request. If the
first positional argument is a generator object, the publish will
be sent in chunks :ref:`(more info) <chunked-messages>`.
:param dict kwargs: keyword arguments to send along with the request
:param bool broadcast:
if ``True``, send to every peer with a matching subscription.
:param bool udp: deliver the message over UDP instead of the usual TCP
:returns: None. use 'rpc' methods for requests with responses.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
'''
if udp:
func = self._dispatcher.send_publish_udp
else:
func = self._dispatcher.send_publish
if not func(None, service, routing_id, method,
args or (), kwargs or {}, singular=not broadcast):
raise errors.Unroutable() | Send a 1-way message
:param service: the service name (the routing top level)
:type service: anything hash-able
:param int routing_id:
the id used for routing within the registered handlers of the
service
:param string method: the method name to call
:param tuple args:
The positional arguments to send along with the request. If the
first positional argument is a generator object, the publish will
be sent in chunks :ref:`(more info) <chunked-messages>`.
:param dict kwargs: keyword arguments to send along with the request
:param bool broadcast:
if ``True``, send to every peer with a matching subscription.
:param bool udp: deliver the message over UDP instead of the usual TCP
:returns: None. use 'rpc' methods for requests with responses.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message | entailment |
def publish_receiver_count(self, service, routing_id):
'''Get the number of peers that would handle a particular publish
:param service: the service name
:type service: anything hash-able
:param routing_id: the id used for limiting the service handlers
:type routing_id: int
'''
peers = len(list(self._dispatcher.find_peer_routes(
const.MSG_TYPE_PUBLISH, service, routing_id)))
if self._dispatcher.locally_handles(const.MSG_TYPE_PUBLISH,
service, routing_id):
return peers + 1
return peers | Get the number of peers that would handle a particular publish
:param service: the service name
:type service: anything hash-able
:param routing_id: the id used for limiting the service handlers
:type routing_id: int | entailment |
def accept_rpc(self, service, mask, value, method,
handler=None, schedule=True):
'''Set a handler for incoming RPCs
:param service: the incoming RPC must have this service
:type service: anything hash-able
:param mask:
value to be bitwise-and'ed against the incoming id, the result of
which must mask the 'value' param
:type mask: int
:param value:
the result of `routing_id & mask` must match this in order to
trigger the handler
:type value: int
:param method: the method name to trigger handler
:type method: string
:param handler:
the function that will be called on incoming matching RPC requests
:type handler: callable
:param schedule:
whether to schedule a separate greenlet running ``handler`` for
each matching message. default ``True``.
:type schedule: bool
:raises:
- :class:`ImpossibleSubscription
<junction.errors.ImpossibleSubscription>` if there is no routing
ID which could possibly match the mask/value pair
- :class:`OverlappingSubscription
<junction.errors.OverlappingSubscription>` if a prior rpc
registration that overlaps with this one (there is a
service/method/routing id that would match *both* this *and* a
previously-made registration).
'''
# support @hub.accept_rpc(serv, mask, val, meth) decorator usage
if handler is None:
return lambda h: self.accept_rpc(
service, mask, value, method, h, schedule)
log.info("accepting RPCs%s %r" % (
" scheduled" if schedule else "",
(service, (mask, value), method),))
self._dispatcher.add_local_subscription(const.MSG_TYPE_RPC_REQUEST,
service, mask, value, method, handler, schedule)
return handler | Set a handler for incoming RPCs
:param service: the incoming RPC must have this service
:type service: anything hash-able
:param mask:
value to be bitwise-and'ed against the incoming id, the result of
which must mask the 'value' param
:type mask: int
:param value:
the result of `routing_id & mask` must match this in order to
trigger the handler
:type value: int
:param method: the method name to trigger handler
:type method: string
:param handler:
the function that will be called on incoming matching RPC requests
:type handler: callable
:param schedule:
whether to schedule a separate greenlet running ``handler`` for
each matching message. default ``True``.
:type schedule: bool
:raises:
- :class:`ImpossibleSubscription
<junction.errors.ImpossibleSubscription>` if there is no routing
ID which could possibly match the mask/value pair
- :class:`OverlappingSubscription
<junction.errors.OverlappingSubscription>` if a prior rpc
registration that overlaps with this one (there is a
service/method/routing id that would match *both* this *and* a
previously-made registration). | entailment |
def unsubscribe_rpc(self, service, mask, value):
'''Remove a rpc subscription
:param service: the service of the subscription to remove
:type service: anything hash-able
:param mask: the mask of the subscription to remove
:type mask: int
:param value: the value in the subscription to remove
:type value: int
:param handler: the handler function of the subscription to remove
:type handler: callable
:returns:
a boolean indicating whether the subscription was there (True) and
removed, or not (False)
'''
log.info("unsubscribing from RPC %r" % ((service, (mask, value)),))
return self._dispatcher.remove_local_subscription(
const.MSG_TYPE_RPC_REQUEST, service, mask, value) | Remove a rpc subscription
:param service: the service of the subscription to remove
:type service: anything hash-able
:param mask: the mask of the subscription to remove
:type mask: int
:param value: the value in the subscription to remove
:type value: int
:param handler: the handler function of the subscription to remove
:type handler: callable
:returns:
a boolean indicating whether the subscription was there (True) and
removed, or not (False) | entailment |
def send_rpc(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args:
The positional arguments to send along with the request. If the
first argument is a generator, the request will be sent in chunks
:ref:`(more info) <chunked-messages>`.
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
'''
rpc = self._dispatcher.send_rpc(service, routing_id, method,
args or (), kwargs or {}, not broadcast)
if not rpc:
raise errors.Unroutable()
return rpc | Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args:
The positional arguments to send along with the request. If the
first argument is a generator, the request will be sent in chunks
:ref:`(more info) <chunked-messages>`.
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message | entailment |
def rpc(self, service, routing_id, method, args=None, kwargs=None,
timeout=None, broadcast=False):
'''Send an RPC request and return the corresponding response
This will block waiting until the response has been received.
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args:
The positional arguments to send along with the request. If the
first argument is a generator, the request will be sent in chunks
:ref:`(more info) <chunked-messages>`.
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param timeout:
maximum time to wait for a response in seconds. with None, there is
no timeout.
:type timeout: float or None
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns:
a list of the objects returned by the RPC's targets. these could be
of any serializable type.
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires
'''
rpc = self.send_rpc(service, routing_id, method,
args or (), kwargs or {}, broadcast)
return rpc.get(timeout) | Send an RPC request and return the corresponding response
This will block waiting until the response has been received.
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args:
The positional arguments to send along with the request. If the
first argument is a generator, the request will be sent in chunks
:ref:`(more info) <chunked-messages>`.
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param timeout:
maximum time to wait for a response in seconds. with None, there is
no timeout.
:type timeout: float or None
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns:
a list of the objects returned by the RPC's targets. these could be
of any serializable type.
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires | entailment |
def rpc_receiver_count(self, service, routing_id):
'''Get the number of peers that would handle a particular RPC
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:returns:
the integer number of peers that would receive the described RPC
'''
peers = len(list(self._dispatcher.find_peer_routes(
const.MSG_TYPE_RPC_REQUEST, service, routing_id)))
if self._dispatcher.locally_handles(const.MSG_TYPE_RPC_REQUEST,
service, routing_id):
return peers + 1
return peers | Get the number of peers that would handle a particular RPC
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:returns:
the integer number of peers that would receive the described RPC | entailment |
def start(self):
"Start up the hub's server, and have it start initiating connections"
log.info("starting")
self._listener_coro = backend.greenlet(self._listener)
self._udp_listener_coro = backend.greenlet(self._udp_listener)
backend.schedule(self._listener_coro)
backend.schedule(self._udp_listener_coro)
for addr in self._peers:
self.add_peer(addr) | Start up the hub's server, and have it start initiating connections | entailment |
def add_peer(self, peer_addr):
"Build a connection to the Hub at a given ``(host, port)`` address"
peer = connection.Peer(
self._ident, self._dispatcher, peer_addr, backend.Socket())
peer.start()
self._started_peers[peer_addr] = peer | Build a connection to the Hub at a given ``(host, port)`` address | entailment |
def peers(self):
"list of the (host, port) pairs of all connected peer Hubs"
return [addr for (addr, peer) in self._dispatcher.peers.items()
if peer.up] | list of the (host, port) pairs of all connected peer Hubs | entailment |
def main(argv=None):
"""Takes crash data via args and generates a Socorro signature
"""
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
parser.add_argument(
'--format', help='specify output format: csv, text (default)'
)
parser.add_argument(
'--different-only', dest='different', action='store_true',
help='limit output to just the signatures that changed',
)
parser.add_argument(
'crashids', metavar='crashid', nargs='*', help='crash id to generate signatures for'
)
if argv is None:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
if args.format == 'csv':
outputter = CSVOutput
else:
outputter = TextOutput
api_token = os.environ.get('SOCORRO_API_TOKEN', '')
generator = SignatureGenerator()
if args.crashids:
crashids_iterable = args.crashids
elif not sys.stdin.isatty():
# If a script is piping to this script, then isatty() returns False. If
# there is no script piping to this script, then isatty() returns True
# and if we do list(sys.stdin), it'll block waiting for input.
crashids_iterable = list(sys.stdin)
else:
crashids_iterable = []
if not crashids_iterable:
parser.print_help()
return 0
with outputter() as out:
for crash_id in crashids_iterable:
crash_id = crash_id.strip()
resp = fetch('/RawCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not exist.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
raw_crash = resp.json()
# If there's an error in the raw crash, then something is wrong--probably with the API
# token. So print that out and exit.
if 'error' in raw_crash:
out.warning('Error fetching raw crash: %s' % raw_crash['error'])
return 1
resp = fetch('/ProcessedCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not have processed crash.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
processed_crash = resp.json()
# If there's an error in the processed crash, then something is wrong--probably with the
# API token. So print that out and exit.
if 'error' in processed_crash:
out.warning('Error fetching processed crash: %s' % processed_crash['error'])
return 1
old_signature = processed_crash['signature']
crash_data = convert_to_crash_data(raw_crash, processed_crash)
result = generator.generate(crash_data)
if not args.different or old_signature != result.signature:
out.data(crash_id, old_signature, result, args.verbose) | Takes crash data via args and generates a Socorro signature | entailment |
def send_result(self, return_code, output, service_description='', time_stamp=0, specific_servers=None):
'''
Send result to the Skinken WS
'''
if time_stamp == 0:
time_stamp = int(time.time())
if specific_servers == None:
specific_servers = self.servers
else:
specific_servers = set(self.servers).intersection(specific_servers)
for server in specific_servers:
post_data = {}
post_data['time_stamp'] = time_stamp
post_data['host_name'] = self.servers[server]['custom_fqdn']
post_data['service_description'] = service_description
post_data['return_code'] = return_code
post_data['output'] = output
if self.servers[server]['availability']:
url = '%s://%s:%s%s' % (self.servers[server]['protocol'],
self.servers[server]['host'],
self.servers[server]['port'],
self.servers[server]['uri'])
auth = (self.servers[server]['username'],
self.servers[server]['password'])
try:
response = requests.post(url,
auth=auth,
headers=self.http_headers,
verify=self.servers[server]['verify'],
timeout=self.servers[server]['timeout'],
data=post_data)
if response.status_code == 400:
LOG.error("[ws_shinken][%s]: HTTP status: %s - The content of the WebService call is incorrect",
server,
response.status_code)
elif response.status_code == 401:
LOG.error("[ws_shinken][%s]: HTTP status: %s - You must provide an username and password",
server,
response.status_code)
elif response.status_code == 403:
LOG.error("[ws_shinken][%s]: HTTP status: %s - The username or password is wrong",
server,
response.status_code)
elif response.status_code != 200:
LOG.error("[ws_shinken][%s]: HTTP status: %s", server, response.status_code)
except (requests.ConnectionError, requests.Timeout), error:
self.servers[server]['availability'] = False
LOG.error(error)
else:
LOG.error("[ws_shinken][%s]: Data not sent, server is unavailable", server)
if self.servers[server]['availability'] == False and self.servers[server]['cache'] == True:
self.servers[server]['csv'].writerow(post_data)
LOG.info("[ws_shinken][%s]: Data cached", server) | Send result to the Skinken WS | entailment |
def close_cache(self):
'''
Close cache of WS Shinken
'''
# Close all WS_Shinken cache files
for server in self.servers:
if self.servers[server]['cache'] == True:
self.servers[server]['file'].close() | Close cache of WS Shinken | entailment |
def prepare_dir(app, directory, delete=False):
"""Create apidoc dir, delete contents if delete is True.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
"""
logger.info("Preparing output directories for jinjaapidoc.")
if os.path.exists(directory):
if delete:
logger.debug("Deleting dir %s", directory)
shutil.rmtree(directory)
logger.debug("Creating dir %s", directory)
os.mkdir(directory)
else:
logger.debug("Creating %s", directory)
os.mkdir(directory) | Create apidoc dir, delete contents if delete is True.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None | entailment |
def makename(package, module):
"""Join package and module with a dot.
Package or Module can be empty.
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:returns: the joined name
:rtype: :class:`str`
:raises: :class:`AssertionError`, if both package and module are empty
"""
# Both package and module can be None/empty.
assert package or module, "Specify either package or module"
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name | Join package and module with a dot.
Package or Module can be empty.
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:returns: the joined name
:rtype: :class:`str`
:raises: :class:`AssertionError`, if both package and module are empty | entailment |
def write_file(app, name, text, dest, suffix, dryrun, force):
"""Write the output file for module/package <name>.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param name: the file name without file extension
:type name: :class:`str`
:param text: the content of the file
:type text: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
fname = os.path.join(dest, '%s.%s' % (name, suffix))
if dryrun:
logger.info('Would create file %s.' % fname)
return
if not force and os.path.isfile(fname):
logger.info('File %s already exists, skipping.' % fname)
else:
logger.info('Creating file %s.' % fname)
f = open(fname, 'w')
try:
f.write(text)
relpath = os.path.relpath(fname, start=app.env.srcdir)
abspath = os.sep + relpath
docpath = app.env.relfn2path(abspath)[0]
docpath = docpath.rsplit(os.path.extsep, 1)[0]
logger.debug('Adding document %s' % docpath)
app.env.found_docs.add(docpath)
finally:
f.close() | Write the output file for module/package <name>.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param name: the file name without file extension
:type name: :class:`str`
:param text: the content of the file
:type text: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None | entailment |
def import_name(app, name):
"""Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None
"""
try:
logger.debug('Importing %r', name)
name, obj = autosummary.import_by_name(name)[:2]
logger.debug('Imported %s', obj)
return obj
except ImportError as e:
logger.warn("Jinjapidoc failed to import %r: %s", name, e) | Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None | entailment |
def get_members(app, mod, typ, include_public=None):
"""Return the members of mod of the given type
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param mod: the module with members
:type mod: module
:param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'``
:type typ: str
:param include_public: list of private members to include to publics
:type include_public: list | None
:returns: None
:rtype: None
:raises: None
"""
def include_here(x):
"""Return true if the member should be included in mod.
A member will be included if it is declared in this module or package.
If the `jinjaapidoc_include_from_all` option is `True` then the member
can also be included if it is listed in `__all__`.
:param x: The member
:type x: A class, exception, or function.
:returns: True if the member should be included in mod. False otherwise.
:rtype: bool
"""
return (x.__module__ == mod.__name__ or (include_from_all and x.__name__ in all_list))
all_list = getattr(mod, '__all__', [])
include_from_all = app.config.jinjaapi_include_from_all
include_public = include_public or []
tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and include_here(x),
'function': lambda x: inspect.isfunction(x) and include_here(x),
'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and include_here(x),
'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x),
'members': lambda x: True}
items = []
for name in dir(mod):
i = getattr(mod, name)
inspect.ismodule(i)
if tests.get(typ, lambda x: False)(i):
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
logger.debug('Got members of %s of type %s: public %s and %s', mod, typ, public, items)
return public, items | Return the members of mod of the given type
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param mod: the module with members
:type mod: module
:param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'``
:type typ: str
:param include_public: list of private members to include to publics
:type include_public: list | None
:returns: None
:rtype: None
:raises: None | entailment |
def _get_submodules(app, module):
"""Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError
"""
if inspect.ismodule(module):
if hasattr(module, '__path__'):
p = module.__path__
else:
return []
elif isinstance(module, str):
p = module
else:
raise TypeError("Only Module or String accepted. %s given." % type(module))
logger.debug('Getting submodules of %s', p)
submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)]
logger.debug('Found submodules of %s: %s', module, submodules)
return submodules | Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError | entailment |
def get_submodules(app, module):
"""Get all submodules without packages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names excluding packages
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if not ispkg] | Get all submodules without packages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names excluding packages
:rtype: list
:raises: TypeError | entailment |
def get_subpackages(app, module):
"""Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if ispkg] | Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError | entailment |
def get_context(app, package, module, fullname):
"""Return a dict for template rendering
Variables:
* :package: The top package
* :module: the module
* :fullname: package.module
* :subpkgs: packages beneath module
* :submods: modules beneath module
* :classes: public classes in module
* :allclasses: public and private classes in module
* :exceptions: public exceptions in module
* :allexceptions: public and private exceptions in module
* :functions: public functions in module
* :allfunctions: public and private functions in module
* :data: public data in module
* :alldata: public and private data in module
* :members: dir(module)
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param package: the parent package name
:type package: str
:param module: the module name
:type module: str
:param fullname: package.module
:type fullname: str
:returns: a dict with variables for template rendering
:rtype: :class:`dict`
:raises: None
"""
var = {'package': package,
'module': module,
'fullname': fullname}
logger.debug('Creating context for: package %s, module %s, fullname %s', package, module, fullname)
obj = import_name(app, fullname)
if not obj:
for k in ('subpkgs', 'submods', 'classes', 'allclasses',
'exceptions', 'allexceptions', 'functions', 'allfunctions',
'data', 'alldata', 'memebers'):
var[k] = []
return var
var['subpkgs'] = get_subpackages(app, obj)
var['submods'] = get_submodules(app, obj)
var['classes'], var['allclasses'] = get_members(app, obj, 'class')
var['exceptions'], var['allexceptions'] = get_members(app, obj, 'exception')
var['functions'], var['allfunctions'] = get_members(app, obj, 'function')
var['data'], var['alldata'] = get_members(app, obj, 'data')
var['members'] = get_members(app, obj, 'members')
logger.debug('Created context: %s', var)
return var | Return a dict for template rendering
Variables:
* :package: The top package
* :module: the module
* :fullname: package.module
* :subpkgs: packages beneath module
* :submods: modules beneath module
* :classes: public classes in module
* :allclasses: public and private classes in module
* :exceptions: public exceptions in module
* :allexceptions: public and private exceptions in module
* :functions: public functions in module
* :allfunctions: public and private functions in module
* :data: public data in module
* :alldata: public and private data in module
* :members: dir(module)
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param package: the parent package name
:type package: str
:param module: the module name
:type module: str
:param fullname: package.module
:type fullname: str
:returns: a dict with variables for template rendering
:rtype: :class:`dict`
:raises: None | entailment |
def create_module_file(app, env, package, module, dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create module file: package %s, module %s', package, module)
template_file = MODULE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(package, module)
var = get_context(app, package, module, fn)
var['ispkg'] = False
rendered = template.render(var)
write_file(app, makename(package, module), rendered, dest, suffix, dryrun, force) | Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None | entailment |
def create_package_file(app, env, root_package, sub_package, private,
dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param root_package: the parent package
:type root_package: :class:`str`
:param sub_package: the package name without root
:type sub_package: :class:`str`
:param private: Include \"_private\" modules
:type private: :class:`bool`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create package file: rootpackage %s, sub_package %s', root_package, sub_package)
template_file = PACKAGE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(root_package, sub_package)
var = get_context(app, root_package, sub_package, fn)
var['ispkg'] = True
for submod in var['submods']:
if shall_skip(app, submod, private):
continue
create_module_file(app, env, fn, submod, dest, suffix, dryrun, force)
rendered = template.render(var)
write_file(app, fn, rendered, dest, suffix, dryrun, force) | Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param root_package: the parent package
:type root_package: :class:`str`
:param sub_package: the package name without root
:type sub_package: :class:`str`
:param private: Include \"_private\" modules
:type private: :class:`bool`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None | entailment |
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False | Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool` | entailment |
def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
"""Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
"""
# check if the base directory is a package and get its name
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504
not is_excluded(os.path.join(root, f), excludes))
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != src:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if private:
exclude_prefixes = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not
is_excluded(os.path.join(root, sub), excludes))
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(app, os.path.join(root, INITPY), private):
subpackage = root[len(src):].lstrip(os.path.sep).\
replace(os.path.sep, '.')
create_package_file(app, env, root_package, subpackage,
private, dest, suffix, dryrun, force)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels | Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str` | entailment |
def normalize_excludes(excludes):
"""Normalize the excluded directory list."""
return [os.path.normpath(os.path.abspath(exclude)) for exclude in excludes] | Normalize the excluded directory list. | entailment |
def is_excluded(root, excludes):
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
root = os.path.normpath(root)
for exclude in excludes:
if root == exclude:
return True
return False | Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar". | entailment |
def generate(app, src, dest, exclude=[], followlinks=False,
force=False, dryrun=False, private=False, suffix='rst',
template_dirs=None):
"""Generage the rst files
Raises an :class:`OSError` if the source path is not a directory.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param src: path to python source files
:type src: :class:`str`
:param dest: output directory
:type dest: :class:`str`
:param exclude: list of paths to exclude
:type exclude: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not create any files
:type dryrun: :class:`bool`
:param private: include \"_private\" modules
:type private: :class:`bool`
:param suffix: file suffix
:type suffix: :class:`str`
:param template_dirs: directories to search for user templates
:type template_dirs: None | :class:`list`
:returns: None
:rtype: None
:raises: OSError
"""
suffix = suffix.strip('.')
if not os.path.isdir(src):
raise OSError("%s is not a directory" % src)
if not os.path.isdir(dest) and not dryrun:
os.makedirs(dest)
src = os.path.normpath(os.path.abspath(src))
exclude = normalize_excludes(exclude)
loader = make_loader(template_dirs)
env = make_environment(loader)
recurse_tree(app, env, src, dest, exclude, followlinks, force, dryrun, private, suffix) | Generage the rst files
Raises an :class:`OSError` if the source path is not a directory.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param src: path to python source files
:type src: :class:`str`
:param dest: output directory
:type dest: :class:`str`
:param exclude: list of paths to exclude
:type exclude: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not create any files
:type dryrun: :class:`bool`
:param private: include \"_private\" modules
:type private: :class:`bool`
:param suffix: file suffix
:type suffix: :class:`str`
:param template_dirs: directories to search for user templates
:type template_dirs: None | :class:`list`
:returns: None
:rtype: None
:raises: OSError | entailment |
def main(app):
"""Parse the config of the app and initiate the generation process
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
c = app.config
src = c.jinjaapi_srcdir
if not src:
return
suffix = "rst"
out = c.jinjaapi_outputdir or app.env.srcdir
if c.jinjaapi_addsummarytemplate:
tpath = pkg_resources.resource_filename(__package__, AUTOSUMMARYTEMPLATE_DIR)
c.templates_path.append(tpath)
tpath = pkg_resources.resource_filename(__package__, TEMPLATE_DIR)
c.templates_path.append(tpath)
prepare_dir(app, out, not c.jinjaapi_nodelete)
generate(app, src, out,
exclude=c.jinjaapi_exclude_paths,
force=c.jinjaapi_force,
followlinks=c.jinjaapi_followlinks,
dryrun=c.jinjaapi_dryrun,
private=c.jinjaapi_includeprivate,
suffix=suffix,
template_dirs=c.templates_path) | Parse the config of the app and initiate the generation process
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None | entailment |
def _isclose(obja, objb, rtol=1e-05, atol=1e-08):
"""Return floating point equality."""
return abs(obja - objb) <= (atol + rtol * abs(objb)) | Return floating point equality. | entailment |
def _isreal(obj):
"""
Determine if an object is a real number.
Both Python standard data types and Numpy data types are supported.
:param obj: Object
:type obj: any
:rtype: boolean
"""
# pylint: disable=W0702
if (obj is None) or isinstance(obj, bool):
return False
try:
cond = (int(obj) == obj) or (float(obj) == obj)
except:
return False
return cond | Determine if an object is a real number.
Both Python standard data types and Numpy data types are supported.
:param obj: Object
:type obj: any
:rtype: boolean | entailment |
def _no_exp(number):
r"""
Convert a number to a string without using scientific notation.
:param number: Number to convert
:type number: integer or float
:rtype: string
:raises: RuntimeError (Argument \`number\` is not valid)
"""
if isinstance(number, bool) or (not isinstance(number, (int, float))):
raise RuntimeError("Argument `number` is not valid")
mant, exp = _to_scientific_tuple(number)
if not exp:
return str(number)
floating_mant = "." in mant
mant = mant.replace(".", "")
if exp < 0:
return "0." + "0" * (-exp - 1) + mant
if not floating_mant:
return mant + "0" * exp + (".0" if isinstance(number, float) else "")
lfpart = len(mant) - 1
if lfpart < exp:
return (mant + "0" * (exp - lfpart)).rstrip(".")
return mant | r"""
Convert a number to a string without using scientific notation.
:param number: Number to convert
:type number: integer or float
:rtype: string
:raises: RuntimeError (Argument \`number\` is not valid) | entailment |
def _to_scientific_tuple(number):
r"""
Return mantissa and exponent of a number expressed in scientific notation.
Full precision is maintained if the number is represented as a string.
:param number: Number
:type number: integer, float or string
:rtype: Tuple whose first item is the mantissa (*string*) and the second
item is the exponent (*integer*) of the number when expressed in
scientific notation
:raises: RuntimeError (Argument \`number\` is not valid)
"""
# pylint: disable=W0632
if isinstance(number, bool) or (not isinstance(number, (int, float, str))):
raise RuntimeError("Argument `number` is not valid")
convert = not isinstance(number, str)
# Detect zero and return, simplifies subsequent algorithm
if (convert and (not number)) or (
(not convert) and (not number.strip("0").strip("."))
):
return ("0", 0)
# Break down number into its components, use Decimal type to
# preserve resolution:
# sign : 0 -> +, 1 -> -
# digits: tuple with digits of number
# exp : exponent that gives null fractional part
sign, digits, exp = Decimal(str(number) if convert else number).as_tuple()
mant = (
"{sign}{itg}.{frac}".format(
sign="-" if sign else "",
itg=digits[0],
frac="".join(str(item) for item in digits[1:]),
)
.rstrip("0")
.rstrip(".")
)
exp += len(digits) - 1
return (mant, exp) | r"""
Return mantissa and exponent of a number expressed in scientific notation.
Full precision is maintained if the number is represented as a string.
:param number: Number
:type number: integer, float or string
:rtype: Tuple whose first item is the mantissa (*string*) and the second
item is the exponent (*integer*) of the number when expressed in
scientific notation
:raises: RuntimeError (Argument \`number\` is not valid) | entailment |
def gcd(vector):
"""
Calculate the greatest common divisor (GCD) of a sequence of numbers.
The sequence can be a list of numbers or a Numpy vector of numbers. The
computations are carried out with a precision of 1E-12 if the objects are
not `fractions <https://docs.python.org/3/library/fractions.html>`_. When
possible it is best to use the `fractions
<https://docs.python.org/3/library/fractions.html>`_ data type with the
numerator and denominator arguments when computing the GCD of floating
point numbers.
:param vector: Vector of numbers
:type vector: list of numbers or Numpy vector of numbers
"""
# pylint: disable=C1801
if not len(vector):
return None
if len(vector) == 1:
return vector[0]
if len(vector) == 2:
return pgcd(vector[0], vector[1])
current_gcd = pgcd(vector[0], vector[1])
for element in vector[2:]:
current_gcd = pgcd(current_gcd, element)
return current_gcd | Calculate the greatest common divisor (GCD) of a sequence of numbers.
The sequence can be a list of numbers or a Numpy vector of numbers. The
computations are carried out with a precision of 1E-12 if the objects are
not `fractions <https://docs.python.org/3/library/fractions.html>`_. When
possible it is best to use the `fractions
<https://docs.python.org/3/library/fractions.html>`_ data type with the
numerator and denominator arguments when computing the GCD of floating
point numbers.
:param vector: Vector of numbers
:type vector: list of numbers or Numpy vector of numbers | entailment |
def normalize(value, series, offset=0):
r"""
Scale a value to the range defined by a series.
:param value: Value to normalize
:type value: number
:param series: List of numbers that defines the normalization range
:type series: list
:param offset: Normalization offset, i.e. the returned value will be in
the range [**offset**, 1.0]
:type offset: number
:rtype: number
:raises:
* RuntimeError (Argument \`offset\` is not valid)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Argument \`value\` is not valid)
* ValueError (Argument \`offset\` has to be in the [0.0, 1.0] range)
* ValueError (Argument \`value\` has to be within the bounds of the
argument \`series\`)
For example::
>>> import pmisc
>>> pmisc.normalize(15, [10, 20])
0.5
>>> pmisc.normalize(15, [10, 20], 0.5)
0.75
"""
if not _isreal(value):
raise RuntimeError("Argument `value` is not valid")
if not _isreal(offset):
raise RuntimeError("Argument `offset` is not valid")
try:
smin = float(min(series))
smax = float(max(series))
except:
raise RuntimeError("Argument `series` is not valid")
value = float(value)
offset = float(offset)
if not 0 <= offset <= 1:
raise ValueError("Argument `offset` has to be in the [0.0, 1.0] range")
if not smin <= value <= smax:
raise ValueError(
"Argument `value` has to be within the bounds of argument `series`"
)
return offset + ((1.0 - offset) * (value - smin) / (smax - smin)) | r"""
Scale a value to the range defined by a series.
:param value: Value to normalize
:type value: number
:param series: List of numbers that defines the normalization range
:type series: list
:param offset: Normalization offset, i.e. the returned value will be in
the range [**offset**, 1.0]
:type offset: number
:rtype: number
:raises:
* RuntimeError (Argument \`offset\` is not valid)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Argument \`value\` is not valid)
* ValueError (Argument \`offset\` has to be in the [0.0, 1.0] range)
* ValueError (Argument \`value\` has to be within the bounds of the
argument \`series\`)
For example::
>>> import pmisc
>>> pmisc.normalize(15, [10, 20])
0.5
>>> pmisc.normalize(15, [10, 20], 0.5)
0.75 | entailment |
def per(arga, argb, prec=10):
r"""
Calculate percentage difference between numbers.
If only two numbers are given, the percentage difference between them is
computed. If two sequences of numbers are given (either two lists of
numbers or Numpy vectors), the element-wise percentage difference is
computed. If any of the numbers in the arguments is zero the value returned
is the maximum floating-point number supported by the Python interpreter.
:param arga: First number, list of numbers or Numpy vector
:type arga: float, integer, list of floats or integers, or Numpy vector
of floats or integers
:param argb: Second number, list of numbers or or Numpy vector
:type argb: float, integer, list of floats or integers, or Numpy vector
of floats or integers
:param prec: Maximum length of the fractional part of the result
:type prec: integer
:rtype: Float, list of floats or Numpy vector, depending on the arguments
type
:raises:
* RuntimeError (Argument \`arga\` is not valid)
* RuntimeError (Argument \`argb\` is not valid)
* RuntimeError (Argument \`prec\` is not valid)
* TypeError (Arguments are not of the same type)
"""
# pylint: disable=C0103,C0200,E1101,R0204
if not isinstance(prec, int):
raise RuntimeError("Argument `prec` is not valid")
a_type = 1 * _isreal(arga) + 2 * (isiterable(arga) and not isinstance(arga, str))
b_type = 1 * _isreal(argb) + 2 * (isiterable(argb) and not isinstance(argb, str))
if not a_type:
raise RuntimeError("Argument `arga` is not valid")
if not b_type:
raise RuntimeError("Argument `argb` is not valid")
if a_type != b_type:
raise TypeError("Arguments are not of the same type")
if a_type == 1:
arga, argb = float(arga), float(argb)
num_min, num_max = min(arga, argb), max(arga, argb)
return (
0
if _isclose(arga, argb)
else (
sys.float_info.max
if _isclose(num_min, 0.0)
else round((num_max / num_min) - 1, prec)
)
)
# Contortions to handle lists and Numpy arrays without explicitly
# having to import numpy
ret = copy.copy(arga)
for num, (x, y) in enumerate(zip(arga, argb)):
if not _isreal(x):
raise RuntimeError("Argument `arga` is not valid")
if not _isreal(y):
raise RuntimeError("Argument `argb` is not valid")
x, y = float(x), float(y)
ret[num] = (
0
if _isclose(x, y)
else (
sys.float_info.max
if _isclose(x, 0.0) or _isclose(y, 0)
else (round((max(x, y) / min(x, y)) - 1, prec))
)
)
return ret | r"""
Calculate percentage difference between numbers.
If only two numbers are given, the percentage difference between them is
computed. If two sequences of numbers are given (either two lists of
numbers or Numpy vectors), the element-wise percentage difference is
computed. If any of the numbers in the arguments is zero the value returned
is the maximum floating-point number supported by the Python interpreter.
:param arga: First number, list of numbers or Numpy vector
:type arga: float, integer, list of floats or integers, or Numpy vector
of floats or integers
:param argb: Second number, list of numbers or or Numpy vector
:type argb: float, integer, list of floats or integers, or Numpy vector
of floats or integers
:param prec: Maximum length of the fractional part of the result
:type prec: integer
:rtype: Float, list of floats or Numpy vector, depending on the arguments
type
:raises:
* RuntimeError (Argument \`arga\` is not valid)
* RuntimeError (Argument \`argb\` is not valid)
* RuntimeError (Argument \`prec\` is not valid)
* TypeError (Arguments are not of the same type) | entailment |
def pgcd(numa, numb):
"""
Calculate the greatest common divisor (GCD) of two numbers.
:param numa: First number
:type numa: number
:param numb: Second number
:type numb: number
:rtype: number
For example:
>>> import pmisc, fractions
>>> pmisc.pgcd(10, 15)
5
>>> str(pmisc.pgcd(0.05, 0.02))
'0.01'
>>> str(pmisc.pgcd(5/3.0, 2/3.0))[:6]
'0.3333'
>>> pmisc.pgcd(
... fractions.Fraction(str(5/3.0)),
... fractions.Fraction(str(2/3.0))
... )
Fraction(1, 3)
>>> pmisc.pgcd(
... fractions.Fraction(5, 3),
... fractions.Fraction(2, 3)
... )
Fraction(1, 3)
"""
# Test for integers this way to be valid also for Numpy data types without
# actually importing (and package depending on) Numpy
int_args = (int(numa) == numa) and (int(numb) == numb)
fraction_args = isinstance(numa, Fraction) and isinstance(numb, Fraction)
# Force conversion for Numpy data types
if int_args:
numa, numb = int(numa), int(numb)
elif not fraction_args:
numa, numb = float(numa), float(numb)
# Limit floating numbers to a "sane" fractional part resolution
if (not int_args) and (not fraction_args):
numa, numb = (
Fraction(_no_exp(numa)).limit_denominator(),
Fraction(_no_exp(numb)).limit_denominator(),
)
while numb:
numa, numb = (
numb,
(numa % numb if int_args else (numa % numb).limit_denominator()),
)
return int(numa) if int_args else (numa if fraction_args else float(numa)) | Calculate the greatest common divisor (GCD) of two numbers.
:param numa: First number
:type numa: number
:param numb: Second number
:type numb: number
:rtype: number
For example:
>>> import pmisc, fractions
>>> pmisc.pgcd(10, 15)
5
>>> str(pmisc.pgcd(0.05, 0.02))
'0.01'
>>> str(pmisc.pgcd(5/3.0, 2/3.0))[:6]
'0.3333'
>>> pmisc.pgcd(
... fractions.Fraction(str(5/3.0)),
... fractions.Fraction(str(2/3.0))
... )
Fraction(1, 3)
>>> pmisc.pgcd(
... fractions.Fraction(5, 3),
... fractions.Fraction(2, 3)
... )
Fraction(1, 3) | entailment |
def connect_ws(self, post_connect_callback, channels, reconnect=False):
"""
Connect to a websocket
:channels: List of SockChannel instances
"""
self.post_conn_cb = post_connect_callback
self.channels = channels
self.wsendpoint = self.context["conf"]["endpoints"].get("websocket")
# Skip connecting if we don't have any channels to listen to
if not channels:
return
# Create socket, connect, setting callbacks along the way
self.sock = Socketcluster.socket(self.wsendpoint)
self.sock.setBasicListener(self._on_connect, self._on_connect_close,
self._on_connect_error)
self.sock.setAuthenticationListener(self._on_set_auth, self._on_auth)
self.sock.setreconnection(reconnect)
self.sock.connect() | Connect to a websocket
:channels: List of SockChannel instances | entailment |
def wscall(self, method, query=None, callback=None):
"""Submit a request on the websocket"""
if callback is None:
self.sock.emit(method, query)
else:
self.sock.emitack(method, query, callback) | Submit a request on the websocket | entailment |
def connect_channels(self, channels):
"""Connect the provided channels"""
self.log.info(f"Connecting to channels...")
for chan in channels:
chan.connect(self.sock)
self.log.info(f"\t{chan.channel}") | Connect the provided channels | entailment |
def _on_set_auth(self, sock, token):
"""Set Auth request received from websocket"""
self.log.info(f"Token received: {token}")
sock.setAuthtoken(token) | Set Auth request received from websocket | entailment |
def _on_auth(self, sock, authenticated): # pylint: disable=unused-argument
"""Message received from websocket"""
def ack(eventname, error, data): # pylint: disable=unused-argument
"""Ack"""
if error:
self.log.error(f"""OnAuth: {error}""")
else:
self.connect_channels(self.channels)
self.post_conn_cb()
sock.emitack("auth", self.creds, ack) | Message received from websocket | entailment |
def _on_connect_error(self, sock, err): # pylint: disable=unused-argument
"""Error received from websocket"""
if isinstance(err, SystemExit):
self.log.error(f"Shutting down websocket connection")
else:
self.log.error(f"Websocket error: {err}") | Error received from websocket | entailment |
def connect(self, sock):
"""Attach a given socket to a channel"""
def cbwrap(*args, **kwargs):
"""Callback wrapper; passes in response_type"""
self.callback(self.response_type, *args, **kwargs)
self.sock = sock
self.sock.subscribe(self.channel)
self.sock.onchannel(self.channel, cbwrap) | Attach a given socket to a channel | entailment |
def run_cmd(cmd, input=None, timeout=30, max_try=3, num_try=1):
'''Run command `cmd`.
It's like that, and that's the way it is.
'''
if type(cmd) == str:
cmd = cmd.split()
process = subprocess.Popen(cmd,
stdin=open('/dev/null', 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
communicate_has_timeout = func_has_arg(func=process.communicate,
arg='timeout')
exception = Exception
if communicate_has_timeout:
exception = subprocess.TimeoutExpired # python 3.x
stdout = stderr = b''
exitcode = None
try:
if communicate_has_timeout:
# python 3.x
stdout, stderr = process.communicate(input, timeout)
exitcode = process.wait()
else:
# python 2.x
if timeout is None:
stdout, stderr = process.communicate(input)
exitcode = process.wait()
else:
# thread-recipe: https://stackoverflow.com/a/4825933
def target():
# closure-recipe: https://stackoverflow.com/a/23558809
target.out, target.err = process.communicate(input)
import threading
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
process.terminate()
thread.join()
exitcode = None
else:
exitcode = process.wait()
stdout = target.out
stderr = target.err
except exception:
if num_try < max_try:
return run_cmd(cmd, input, timeout, max_try, num_try+1)
else:
return CmdResult(exitcode, stdout, stderr, cmd, input)
return CmdResult(exitcode, stdout, stderr, cmd, input) | Run command `cmd`.
It's like that, and that's the way it is. | entailment |
def pop_first_arg(argv):
"""
find first positional arg (does not start with -), take it out of array and return it separately
returns (arg, array)
"""
for arg in argv:
if not arg.startswith('-'):
argv.remove(arg)
return (arg, argv)
return (None, argv) | find first positional arg (does not start with -), take it out of array and return it separately
returns (arg, array) | entailment |
def check_options(options, parser):
"""
check options requirements, print and return exit value
"""
if not options.get('release_environment', None):
print("release environment is required")
parser.print_help()
return os.EX_USAGE
return 0 | check options requirements, print and return exit value | entailment |
def write(self):
""" write all needed state info to filesystem """
dumped = self._fax.codec.dump(self.__state, open(self.state_file, 'w')) | write all needed state info to filesystem | entailment |
def package_config(path, template='__config__.ini.TEMPLATE', config_name='__config__.ini', **params):
"""configure the module at the given path with a config template and file.
path = the filesystem path to the given module
template = the config template filename within that path
config_name = the config filename within that path
params = a dict containing config params, which are found in the template using %(key)s.
"""
config_fns = []
template_fns = rglob(path, template)
for template_fn in template_fns:
config_template = ConfigTemplate(fn=template_fn)
config = config_template.render(
fn=os.path.join(os.path.dirname(template_fn), config_name),
prompt=True, path=path, **params)
config.write()
config_fns.append(config.fn)
log.info('wrote %r' % config)
return config_fns | configure the module at the given path with a config template and file.
path = the filesystem path to the given module
template = the config template filename within that path
config_name = the config filename within that path
params = a dict containing config params, which are found in the template using %(key)s. | entailment |
def write(self, fn=None, sorted=False, wait=0):
"""write the contents of this config to fn or its __filename__.
"""
config = ConfigParser(interpolation=None)
if sorted==True: keys.sort()
for key in self.__dict__.get('ordered_keys') or self.keys():
config[key] = {}
ks = self[key].keys()
if sorted==True: ks.sort()
for k in ks:
if type(self[key][k])==list and self.__join_list__ is not None:
config[key][k] = self.__join_list__.join([v for v in self[key][k] if v!=''])
else:
config[key][k] = str(self[key][k])
fn = fn or self.__dict__.get('__filename__')
# use advisory locking on this file
i = 0
while os.path.exists(fn+'.LOCK') and i < wait:
i += 1
time.sleep(1)
if os.path.exists(fn+'.LOCK'):
raise FileExistsError(fn + ' is locked for writing')
else:
with open(fn+'.LOCK', 'w') as lf:
lf.write(time.strftime("%Y-%m-%d %H:%M:%S %Z"))
with open(fn, 'w') as f:
config.write(f)
os.remove(fn+'.LOCK') | write the contents of this config to fn or its __filename__. | entailment |
def expected_param_keys(self):
"""returns a list of params that this ConfigTemplate expects to receive"""
expected_keys = []
r = re.compile('%\(([^\)]+)\)s')
for block in self.keys():
for key in self[block].keys():
s = self[block][key]
if type(s)!=str: continue
md = re.search(r, s)
while md is not None:
k = md.group(1)
if k not in expected_keys:
expected_keys.append(k)
s = s[md.span()[1]:]
md = re.search(r, s)
return expected_keys | returns a list of params that this ConfigTemplate expects to receive | entailment |
def render(self, fn=None, prompt=False, **params):
"""return a Config with the given params formatted via ``str.format(**params)``.
fn=None : If given, will assign this filename to the rendered Config.
prompt=False : If True, will prompt for any param that is None.
"""
from getpass import getpass
expected_keys = self.expected_param_keys()
compiled_params = Dict(**params)
for key in expected_keys:
if key not in compiled_params.keys():
if prompt==True:
if key=='password':
compiled_params[key] = getpass("%s: " % key)
else:
compiled_params[key] = input("%s: " % key)
if 'path' in key:
compiled_params[key] = compiled_params[key].replace('\\','')
else:
compiled_params[key] = "%%(%s)s" % key
config = ConfigTemplate(fn=fn, **self)
config.__dict__['ordered_keys'] = self.__dict__.get('ordered_keys')
for block in config.keys():
for key in config[block].keys():
if type(config[block][key])==str:
config[block][key] = config[block][key] % compiled_params
return config | return a Config with the given params formatted via ``str.format(**params)``.
fn=None : If given, will assign this filename to the rendered Config.
prompt=False : If True, will prompt for any param that is None. | entailment |
def main():
"""Takes a crash id, pulls down data from Socorro, generates signature data"""
parser = argparse.ArgumentParser(
formatter_class=WrappedTextHelpFormatter,
description=DESCRIPTION
)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
parser.add_argument(
'crashid', help='crash id to generate signatures for'
)
args = parser.parse_args()
api_token = os.environ.get('SOCORRO_API_TOKEN', '')
crash_id = args.crashid.strip()
resp = fetch('/RawCrash/', crash_id, api_token)
if resp.status_code == 404:
printerr('%s: does not exist.' % crash_id)
return 1
if resp.status_code == 429:
printerr('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
printerr('HTTP 500: %s' % resp.content)
return 1
raw_crash = resp.json()
# If there's an error in the raw crash, then something is wrong--probably with the API
# token. So print that out and exit.
if 'error' in raw_crash:
print('Error fetching raw crash: %s' % raw_crash['error'], file=sys.stderr)
return 1
resp = fetch('/ProcessedCrash/', crash_id, api_token)
if resp.status_code == 404:
printerr('%s: does not have processed crash.' % crash_id)
return 1
if resp.status_code == 429:
printerr('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
printerr('HTTP 500: %s' % resp.content)
return 1
processed_crash = resp.json()
# If there's an error in the processed crash, then something is wrong--probably with the
# API token. So print that out and exit.
if 'error' in processed_crash:
printerr('Error fetching processed crash: %s' % processed_crash['error'])
return 1
crash_data = convert_to_crash_data(raw_crash, processed_crash)
print(json.dumps(crash_data, indent=2)) | Takes a crash id, pulls down data from Socorro, generates signature data | entailment |
def _fill_text(self, text, width, indent):
"""Wraps text like HelpFormatter, but doesn't squash lines
This makes it easier to do lists and paragraphs.
"""
parts = text.split('\n\n')
for i, part in enumerate(parts):
# Check to see if it's a bulleted list--if so, then fill each line
if part.startswith('* '):
subparts = part.split('\n')
for j, subpart in enumerate(subparts):
subparts[j] = super(WrappedTextHelpFormatter, self)._fill_text(
subpart, width, indent
)
parts[i] = '\n'.join(subparts)
else:
parts[i] = super(WrappedTextHelpFormatter, self)._fill_text(part, width, indent)
return '\n\n'.join(parts) | Wraps text like HelpFormatter, but doesn't squash lines
This makes it easier to do lists and paragraphs. | entailment |
def get_api_services_by_name(self):
"""Return a dict of services by name"""
if not self.services_by_name:
self.services_by_name = dict({s.get('name'): s for s in self.conf
.get("api")
.get("services")})
return self.services_by_name | Return a dict of services by name | entailment |
def get_api_endpoints(self, apiname):
"""Returns the API endpoints"""
try:
return self.services_by_name\
.get(apiname)\
.get("endpoints")\
.copy()
except AttributeError:
raise Exception(f"Couldn't find the API endpoints") | Returns the API endpoints | entailment |
def get_ws_subscriptions(self, apiname):
"""Returns the websocket subscriptions"""
try:
return self.services_by_name\
.get(apiname)\
.get("subscriptions")\
.copy()
except AttributeError:
raise Exception(f"Couldn't find the websocket subscriptions") | Returns the websocket subscriptions | entailment |
def get_api(self, name=None):
"""Returns the API configuration"""
if name is None:
try:
return self.conf.get("api").copy()
except: # NOQA
raise Exception(f"Couldn't find the API configuration") | Returns the API configuration | entailment |
def get_api_service(self, name=None):
"""Returns the specific service config definition"""
try:
svc = self.services_by_name.get(name, None)
if svc is None:
raise ValueError(f"Couldn't find the API service configuration")
return svc
except: # NOQA
raise Exception(f"Failed to retrieve the API service configuration") | Returns the specific service config definition | entailment |
def _ex_type_str(exobj):
"""Return a string corresponding to the exception type."""
regexp = re.compile(r"<(?:\bclass\b|\btype\b)\s+'?([\w|\.]+)'?>")
exc_type = str(exobj)
if regexp.match(exc_type):
exc_type = regexp.match(exc_type).groups()[0]
exc_type = exc_type[11:] if exc_type.startswith("exceptions.") else exc_type
if "." in exc_type:
exc_type = exc_type.split(".")[-1]
return exc_type | Return a string corresponding to the exception type. | entailment |
def _unicode_to_ascii(obj): # pragma: no cover
"""Convert to ASCII."""
# pylint: disable=E0602,R1717
if isinstance(obj, dict):
return dict(
[
(_unicode_to_ascii(key), _unicode_to_ascii(value))
for key, value in obj.items()
]
)
if isinstance(obj, list):
return [_unicode_to_ascii(element) for element in obj]
if isinstance(obj, unicode):
return obj.encode("utf-8")
return obj | Convert to ASCII. | entailment |
def send_result(self, return_code, output, service_description='', specific_servers=None):
'''
Send results
'''
if specific_servers == None:
specific_servers = self.servers
else:
specific_servers = set(self.servers).intersection(specific_servers)
for server in specific_servers:
if self.servers[server]['availability']:
try:
self.servers[server]['notifier'].svc_result(self.servers[server]['custom_fqdn'],
service_description,
int(return_code),
str(output))
LOG.info("[nsca][%s][%s]: Data sent", service_description, self.servers[server]['host'])
except (socket.gaierror, socket.error), error:
self.servers[server]['availability'] = False
LOG.error("[nsca][%s][%s]: %s", service_description, self.servers[server]['host'], error[1])
else:
LOG.error("[nsca][%s][%s]: Data not sent, server is unavailable", service_description, self.servers[server]['host']) | Send results | entailment |
def get_remote_executors(hub_ip, port = 4444):
''' Get remote hosts from Selenium Grid Hub Console
@param hub_ip: hub ip of selenium grid hub
@param port: hub port of selenium grid hub
'''
resp = requests.get("http://%s:%s/grid/console" %(hub_ip, port))
remote_hosts = ()
if resp.status_code == 200:
remote_hosts = re.findall("remoteHost: ([\w/\.:]+)",resp.text)
return [host + "/wd/hub" for host in remote_hosts] | Get remote hosts from Selenium Grid Hub Console
@param hub_ip: hub ip of selenium grid hub
@param port: hub port of selenium grid hub | entailment |
def gen_remote_driver(executor, capabilities):
''' Generate remote drivers with desired capabilities(self.__caps) and command_executor
@param executor: command executor for selenium remote driver
@param capabilities: A dictionary of capabilities to request when starting the browser session.
@return: remote driver
'''
# selenium requires browser's driver and PATH env. Firefox's driver is required for selenium3.0
firefox_profile = capabilities.pop("firefox_profile",None)
return webdriver.Remote(executor, desired_capabilities=capabilities, browser_profile = firefox_profile) | Generate remote drivers with desired capabilities(self.__caps) and command_executor
@param executor: command executor for selenium remote driver
@param capabilities: A dictionary of capabilities to request when starting the browser session.
@return: remote driver | entailment |
def gen_local_driver(browser, capabilities):
''' Generate localhost drivers with desired capabilities(self.__caps)
@param browser: firefox or chrome
@param capabilities: A dictionary of capabilities to request when starting the browser session.
@return: localhost driver
'''
if browser == "firefox":
fp = capabilities.pop("firefox_profile",None)
return webdriver.Firefox(desired_capabilities =capabilities, firefox_profile=fp)
elif browser == "chrome":
return webdriver.Chrome(desired_capabilities=capabilities)
else:
raise TypeError("Unsupport browser {}".format(browser)) | Generate localhost drivers with desired capabilities(self.__caps)
@param browser: firefox or chrome
@param capabilities: A dictionary of capabilities to request when starting the browser session.
@return: localhost driver | entailment |
def _production(self):
"""Calculate total energy production. Not rounded"""
return self._nuclear + self._diesel + self._gas + self._wind + self._combined + self._vapor + self._solar + self._hydraulic + self._carbon + self._waste + self._other | Calculate total energy production. Not rounded | entailment |
def _links(self):
"""Calculate total energy production. Not Rounded"""
total = 0.0
for value in self.link.values():
total += value
return total | Calculate total energy production. Not Rounded | entailment |
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
print(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") | Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no". | entailment |
def wall_of_name(self):
'''
Appends identifiers for the different databases (such as Entrez id's)
and returns them. Uses the CrossRef class below.
'''
names = []
if self.standard_name:
names.append(self.standard_name)
if self.systematic_name:
names.append(self.systematic_name)
names.extend([xref.xrid for xref in self.crossref_set.all()])
for i in range(len(names)):
names[i] = re.sub(nonalpha, '', names[i])
names_string = ' '.join(names)
if self.standard_name:
names_string += ' ' + re.sub(num, '', self.standard_name)
return names_string | Appends identifiers for the different databases (such as Entrez id's)
and returns them. Uses the CrossRef class below. | entailment |
def save(self, *args, **kwargs):
"""
Override save() method to make sure that standard_name and
systematic_name won't be null or empty, or consist of only space
characters (such as space, tab, new line, etc).
"""
empty_std_name = False
if not self.standard_name or self.standard_name.isspace():
empty_std_name = True
empty_sys_name = False
if not self.systematic_name or self.systematic_name.isspace():
empty_sys_name = True
if empty_std_name and empty_sys_name:
raise ValueError(
"Both standard_name and systematic_name are empty")
super(Gene, self).save(*args, **kwargs) | Override save() method to make sure that standard_name and
systematic_name won't be null or empty, or consist of only space
characters (such as space, tab, new line, etc). | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.