signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
---|---|---|---|
def _get_role_arn(): | role_arn = bottle.request.headers.get('<STR_LIT>')<EOL>if not role_arn:<EOL><INDENT>role_arn = _lookup_ip_role_arn(bottle.request.environ.get('<STR_LIT>'))<EOL><DEDENT>if not role_arn:<EOL><INDENT>role_arn = _role_arn<EOL><DEDENT>return role_arn<EOL> | Return role arn from X-Role-ARN header,
lookup role arn from source IP,
or fall back to command line default. | f2188:m1 |
def _format_iso(dt): | return datetime.datetime.strftime(dt, "<STR_LIT>")<EOL> | Format UTC datetime as iso8601 to second resolution. | f2188:m2 |
def _index(items): | bottle.response.content_type = '<STR_LIT>'<EOL>return "<STR_LIT:\n>".join(items)<EOL> | Format index list pages. | f2188:m3 |
def __contains__(self, key): | return key in self._word_frequency<EOL> | setup easier known checks | f2192:c0:m1 |
def __getitem__(self, key): | return self._word_frequency[key]<EOL> | setup easier frequency checks | f2192:c0:m2 |
@property<EOL><INDENT>def word_frequency(self):<DEDENT> | return self._word_frequency<EOL> | WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable | f2192:c0:m3 |
@property<EOL><INDENT>def distance(self):<DEDENT> | return self._distance<EOL> | int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 | f2192:c0:m4 |
@distance.setter<EOL><INDENT>def distance(self, val):<DEDENT> | tmp = <NUM_LIT:2><EOL>try:<EOL><INDENT>int(val)<EOL>if val > <NUM_LIT:0> and val <= <NUM_LIT:2>:<EOL><INDENT>tmp = val<EOL><DEDENT><DEDENT>except (ValueError, TypeError):<EOL><INDENT>pass<EOL><DEDENT>self._distance = tmp<EOL> | set the distance parameter | f2192:c0:m5 |
def split_words(self, text): | return self._tokenizer(text)<EOL> | Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text | f2192:c0:m6 |
def export(self, filepath, encoding="<STR_LIT:utf-8>", gzipped=True): | data = json.dumps(self.word_frequency.dictionary, sort_keys=True)<EOL>write_file(filepath, encoding, gzipped, data)<EOL> | Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not | f2192:c0:m7 |
def word_probability(self, word, total_words=None): | if total_words is None:<EOL><INDENT>total_words = self._word_frequency.total_words<EOL><DEDENT>return self._word_frequency.dictionary[word] / total_words<EOL> | Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word | f2192:c0:m8 |
def correction(self, word): | return max(self.candidates(word), key=self.word_probability)<EOL> | The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate | f2192:c0:m9 |
def candidates(self, word): | if self.known([word]): <EOL><INDENT>return {word}<EOL><DEDENT>res = [x for x in self.edit_distance_1(word)]<EOL>tmp = self.known(res)<EOL>if tmp:<EOL><INDENT>return tmp<EOL><DEDENT>if self._distance == <NUM_LIT:2>:<EOL><INDENT>tmp = self.known([x for x in self.__edit_distance_alt(res)])<EOL>if tmp:<EOL><INDENT>return tmp<EOL><DEDENT><DEDENT>return {word}<EOL> | Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates | f2192:c0:m10 |
def known(self, words): | tmp = [w.lower() for w in words]<EOL>return set(<EOL>w<EOL>for w in tmp<EOL>if w in self._word_frequency.dictionary<EOL>or not self._check_if_should_check(w)<EOL>)<EOL> | The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus | f2192:c0:m11 |
def unknown(self, words): | tmp = [w.lower() for w in words if self._check_if_should_check(w)]<EOL>return set(w for w in tmp if w not in self._word_frequency.dictionary)<EOL> | The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus | f2192:c0:m12 |
def edit_distance_1(self, word): | word = word.lower()<EOL>if self._check_if_should_check(word) is False:<EOL><INDENT>return {word}<EOL><DEDENT>letters = self._word_frequency.letters<EOL>splits = [(word[:i], word[i:]) for i in range(len(word) + <NUM_LIT:1>)]<EOL>deletes = [L + R[<NUM_LIT:1>:] for L, R in splits if R]<EOL>transposes = [L + R[<NUM_LIT:1>] + R[<NUM_LIT:0>] + R[<NUM_LIT:2>:] for L, R in splits if len(R) > <NUM_LIT:1>]<EOL>replaces = [L + c + R[<NUM_LIT:1>:] for L, R in splits if R for c in letters]<EOL>inserts = [L + c + R for L, R in splits for c in letters]<EOL>return set(deletes + transposes + replaces + inserts)<EOL> | Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word | f2192:c0:m13 |
def edit_distance_2(self, word): | word = word.lower()<EOL>return [<EOL>e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)<EOL>]<EOL> | Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word | f2192:c0:m14 |
def __edit_distance_alt(self, words): | words = [x.lower() for x in words]<EOL>return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]<EOL> | Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words | f2192:c0:m15 |
def __contains__(self, key): | return key.lower() in self._dictionary<EOL> | turn on contains | f2192:c1:m1 |
def __getitem__(self, key): | return self._dictionary[key.lower()]<EOL> | turn on getitem | f2192:c1:m2 |
def pop(self, key, default=None): | return self._dictionary.pop(key.lower(), default)<EOL> | Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present | f2192:c1:m3 |
@property<EOL><INDENT>def dictionary(self):<DEDENT> | return self._dictionary<EOL> | Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable | f2192:c1:m4 |
@property<EOL><INDENT>def total_words(self):<DEDENT> | return self._total_words<EOL> | int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable | f2192:c1:m5 |
@property<EOL><INDENT>def unique_words(self):<DEDENT> | return self._unique_words<EOL> | int: The total number of unique words in the word frequency list
Note:
Not settable | f2192:c1:m6 |
@property<EOL><INDENT>def letters(self):<DEDENT> | return self._letters<EOL> | str: The listing of all letters found within the corpus
Note:
Not settable | f2192:c1:m7 |
def tokenize(self, text): | for x in self._tokenizer(text):<EOL><INDENT>yield x.lower()<EOL><DEDENT> | Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` | f2192:c1:m8 |
def keys(self): | for key in self._dictionary.keys():<EOL><INDENT>yield key<EOL><DEDENT> | Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` | f2192:c1:m9 |
def words(self): | for word in self._dictionary.keys():<EOL><INDENT>yield word<EOL><DEDENT> | Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` | f2192:c1:m10 |
def items(self): | for word in self._dictionary.keys():<EOL><INDENT>yield word, self._dictionary[word]<EOL><DEDENT> | Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` | f2192:c1:m11 |
def load_dictionary(self, filename, encoding="<STR_LIT:utf-8>"): | with load_file(filename, encoding) as data:<EOL><INDENT>self._dictionary.update(json.loads(data.lower(), encoding=encoding))<EOL>self._update_dictionary()<EOL><DEDENT> | Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary | f2192:c1:m12 |
def load_text_file(self, filename, encoding="<STR_LIT:utf-8>", tokenizer=None): | with load_file(filename, encoding=encoding) as data:<EOL><INDENT>self.load_text(data, tokenizer)<EOL><DEDENT> | Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string | f2192:c1:m13 |
def load_text(self, text, tokenizer=None): | if tokenizer:<EOL><INDENT>words = [x.lower() for x in tokenizer(text)]<EOL><DEDENT>else:<EOL><INDENT>words = self.tokenize(text)<EOL><DEDENT>self._dictionary.update(words)<EOL>self._update_dictionary()<EOL> | Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string | f2192:c1:m14 |
def load_words(self, words): | self._dictionary.update([word.lower() for word in words])<EOL>self._update_dictionary()<EOL> | Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded | f2192:c1:m15 |
def add(self, word): | self.load_words([word])<EOL> | Add a word to the word frequency list
Args:
word (str): The word to add | f2192:c1:m16 |
def remove_words(self, words): | for word in words:<EOL><INDENT>self._dictionary.pop(word.lower())<EOL><DEDENT>self._update_dictionary()<EOL> | Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove | f2192:c1:m17 |
def remove(self, word): | self._dictionary.pop(word.lower())<EOL>self._update_dictionary()<EOL> | Remove a word from the word frequency list
Args:
word (str): The word to remove | f2192:c1:m18 |
def remove_by_threshold(self, threshold=<NUM_LIT:5>): | keys = [x for x in self._dictionary.keys()]<EOL>for key in keys:<EOL><INDENT>if self._dictionary[key] <= threshold:<EOL><INDENT>self._dictionary.pop(key)<EOL><DEDENT><DEDENT>self._update_dictionary()<EOL> | Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed | f2192:c1:m19 |
def _update_dictionary(self): | self._total_words = sum(self._dictionary.values())<EOL>self._unique_words = len(self._dictionary.keys())<EOL>self._letters = set()<EOL>for key in self._dictionary:<EOL><INDENT>self._letters.update(key)<EOL><DEDENT> | Update the word frequency object | f2192:c1:m20 |
@contextlib.contextmanager<EOL>def load_file(filename, encoding): | try:<EOL><INDENT>with gzip.open(filename, mode="<STR_LIT>") as fobj:<EOL><INDENT>yield fobj.read()<EOL><DEDENT><DEDENT>except (OSError, IOError):<EOL><INDENT>with OPEN(filename, mode="<STR_LIT:r>", encoding=encoding) as fobj:<EOL><INDENT>yield fobj.read()<EOL><DEDENT><DEDENT> | Context manager to handle opening a gzip or text file correctly and
reading all the data
Args:
filename (str): The filename to open
encoding (str): The file encoding to use
Yields:
str: The string data from the file read | f2194:m0 |
def write_file(filepath, encoding, gzipped, data): | if gzipped:<EOL><INDENT>with gzip.open(filepath, "<STR_LIT>") as fobj:<EOL><INDENT>fobj.write(data)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with OPEN(filepath, "<STR_LIT:w>", encoding=encoding) as fobj:<EOL><INDENT>if sys.version_info < (<NUM_LIT:3>, <NUM_LIT:0>):<EOL><INDENT>data = data.decode(encoding)<EOL><DEDENT>fobj.write(data)<EOL><DEDENT><DEDENT> | Write the data to file either as a gzip file or text based on the
gzipped parameter
Args:
filepath (str): The filename to open
encoding (str): The file encoding to use
gzipped (bool): Whether the file should be gzipped or not
data (str): The data to be written out | f2194:m1 |
def _parse_into_words(text): | return re.findall(r"<STR_LIT>", text.lower())<EOL> | Parse the text into words; currently removes punctuation
Args:
text (str): The text to split into words | f2194:m2 |
def read_file(filepath): | with io.open(filepath, '<STR_LIT:r>') as filepointer:<EOL><INDENT>res = filepointer.read()<EOL><DEDENT>return res<EOL> | read the file | f2197:m0 |
def get_html_theme_path(): | cur_dir = path.abspath(path.dirname(path.dirname(__file__)))<EOL>return cur_dir<EOL> | Return list of HTML theme paths. | f2198:m0 |
def _check_multiproc_env_var(): | if '<STR_LIT>' in os.environ:<EOL><INDENT>if os.path.isdir(os.environ['<STR_LIT>']):<EOL><INDENT>return<EOL><DEDENT><DEDENT>raise ValueError('<STR_LIT>')<EOL> | Checks that the `prometheus_multiproc_dir` environment variable is set,
which is required for the multiprocess collector to work properly.
:raises ValueError: if the environment variable is not set
or if it does not point to a directory | f2200:m0 |
def __init__(self, app=None, export_defaults=True,<EOL>defaults_prefix='<STR_LIT>', group_by='<STR_LIT:path>',<EOL>buckets=None, registry=None): | _check_multiproc_env_var()<EOL>registry = registry or CollectorRegistry()<EOL>MultiProcessCollector(registry)<EOL>super(MultiprocessPrometheusMetrics, self).__init__(<EOL>app=app, path=None, export_defaults=export_defaults,<EOL>defaults_prefix=defaults_prefix, group_by=group_by,<EOL>buckets=buckets, registry=registry<EOL>)<EOL> | Create a new multiprocess-aware Prometheus metrics export configuration.
:param app: the Flask application (can be `None`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param defaults_prefix: string to prefix the default exported
metrics name with (when either `export_defaults=True` or
`export_defaults(..)` is called)
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `url_rule`, etc.
(defaults to `path`)
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param registry: the Prometheus Registry to use (can be `None` and it
will be registered with `prometheus_client.multiprocess.MultiProcessCollector`) | f2200:c0:m0 |
def start_http_server(self, port, host='<STR_LIT>', endpoint=None): | if self.should_start_http_server():<EOL><INDENT>pc_start_http_server(port, host, registry=self.registry)<EOL><DEDENT> | Start an HTTP server for exposing the metrics, if the
`should_start_http_server` function says we should, otherwise just return.
Uses the implementation from `prometheus_client` rather than a Flask app.
:param port: the HTTP port to expose the metrics endpoint on
:param host: the HTTP host to listen on (default: `0.0.0.0`)
:param endpoint: **ignored**, the HTTP server will respond on any path | f2200:c0:m1 |
@abstractmethod<EOL><INDENT>def should_start_http_server(self):<DEDENT> | pass<EOL> | Whether or not to start the HTTP server.
Only return `True` from one process only, typically the main one.
Note: you still need to explicitly call the `start_http_server` function.
:return: `True` if the server should start, `False` otherwise | f2200:c0:m2 |
@classmethod<EOL><INDENT>def start_http_server_when_ready(cls, port, host='<STR_LIT>'):<DEDENT> | _check_multiproc_env_var()<EOL>GunicornPrometheusMetrics().start_http_server(port, host)<EOL> | Start the HTTP server from the Gunicorn config module.
Doesn't necessarily need an instance, a class is fine.
Example:
def when_ready(server):
GunicornPrometheusMetrics.start_http_server_when_ready(metrics_port)
:param port: the HTTP port to expose the metrics endpoint on
:param host: the HTTP host to listen on (default: `0.0.0.0`) | f2200:c2:m1 |
@classmethod<EOL><INDENT>def mark_process_dead_on_child_exit(cls, pid):<DEDENT> | pc_mark_process_dead(pid)<EOL> | Mark a child worker as exited from the Gunicorn config module.
Example:
def child_exit(server, worker):
GunicornPrometheusMetrics.mark_process_dead_on_child_exit(worker.pid)
:param pid: the worker pid that has exited | f2200:c2:m2 |
def __init__(self, app, path='<STR_LIT>',<EOL>export_defaults=True, defaults_prefix='<STR_LIT>',<EOL>group_by='<STR_LIT:path>', buckets=None,<EOL>registry=None, **kwargs): | self.app = app<EOL>self.path = path<EOL>self._export_defaults = export_defaults<EOL>self._defaults_prefix = defaults_prefix or '<STR_LIT>'<EOL>self.buckets = buckets<EOL>self.version = __version__<EOL>if registry:<EOL><INDENT>self.registry = registry<EOL><DEDENT>else:<EOL><INDENT>from prometheus_client import REGISTRY as DEFAULT_REGISTRY<EOL>self.registry = DEFAULT_REGISTRY<EOL><DEDENT>if kwargs.get('<STR_LIT>') is True:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', DeprecationWarning<EOL>)<EOL>self.group_by = '<STR_LIT>'<EOL><DEDENT>elif group_by:<EOL><INDENT>self.group_by = group_by<EOL><DEDENT>else:<EOL><INDENT>self.group_by = '<STR_LIT:path>'<EOL><DEDENT>if app is not None:<EOL><INDENT>self.init_app(app)<EOL><DEDENT> | Create a new Prometheus metrics export configuration.
:param app: the Flask application
:param path: the metrics path (defaults to `/metrics`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param defaults_prefix: string to prefix the default exported
metrics name with (when either `export_defaults=True` or
`export_defaults(..)` is called) or in case you don't want
any prefix then use `NO_PREFIX` constant
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `url_rule`, etc.
(defaults to `path`)
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param registry: the Prometheus Registry to use | f2201:c0:m0 |
def init_app(self, app): | if self.path:<EOL><INDENT>self.register_endpoint(self.path, app)<EOL><DEDENT>if self._export_defaults:<EOL><INDENT>self.export_defaults(<EOL>self.buckets, self.group_by,<EOL>self._defaults_prefix, app<EOL>)<EOL><DEDENT> | This callback can be used to initialize an application for the
use with this prometheus reporter setup.
This is usually used with a flask "app factory" configuration. Please
see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/
Note, that you need to use `PrometheusMetrics(app=None, ...)`
for this mode, otherwise it is called automatically.
:param app: the Flask application | f2201:c0:m1 |
def register_endpoint(self, path, app=None): | if is_running_from_reloader() and not os.environ.get('<STR_LIT>'):<EOL><INDENT>return<EOL><DEDENT>if app is None:<EOL><INDENT>app = self.app or current_app<EOL><DEDENT>@app.route(path)<EOL>@self.do_not_track()<EOL>def prometheus_metrics():<EOL><INDENT>from prometheus_client import multiprocess, CollectorRegistry<EOL>if '<STR_LIT>' in os.environ:<EOL><INDENT>registry = CollectorRegistry()<EOL><DEDENT>else:<EOL><INDENT>registry = self.registry<EOL><DEDENT>if '<STR_LIT>' in request.args:<EOL><INDENT>registry = registry.restricted_registry(request.args.getlist('<STR_LIT>'))<EOL><DEDENT>if '<STR_LIT>' in os.environ:<EOL><INDENT>multiprocess.MultiProcessCollector(registry)<EOL><DEDENT>headers = {'<STR_LIT:Content-Type>': CONTENT_TYPE_LATEST}<EOL>return generate_latest(registry), <NUM_LIT:200>, headers<EOL><DEDENT> | Register the metrics endpoint on the Flask application.
:param path: the path of the endpoint
:param app: the Flask application to register the endpoint on
(by default it is the application registered with this class) | f2201:c0:m2 |
def start_http_server(self, port, host='<STR_LIT>', endpoint='<STR_LIT>'): | if is_running_from_reloader():<EOL><INDENT>return<EOL><DEDENT>app = Flask('<STR_LIT>' % port)<EOL>self.register_endpoint(endpoint, app)<EOL>def run_app():<EOL><INDENT>app.run(host=host, port=port)<EOL><DEDENT>thread = threading.Thread(target=run_app)<EOL>thread.setDaemon(True)<EOL>thread.start()<EOL> | Start an HTTP server for exposing the metrics.
This will be an individual Flask application,
not the one registered with this class.
:param port: the HTTP port to expose the metrics endpoint on
:param host: the HTTP host to listen on (default: `0.0.0.0`)
:param endpoint: the URL path to expose the endpoint on
(default: `/metrics`) | f2201:c0:m3 |
def export_defaults(self, buckets=None, group_by='<STR_LIT:path>',<EOL>prefix='<STR_LIT>', app=None, **kwargs): | if app is None:<EOL><INDENT>app = self.app or current_app<EOL><DEDENT>if not prefix:<EOL><INDENT>prefix = self._defaults_prefix or '<STR_LIT>'<EOL><DEDENT>buckets_as_kwargs = {}<EOL>if buckets is not None:<EOL><INDENT>buckets_as_kwargs['<STR_LIT>'] = buckets<EOL><DEDENT>if kwargs.get('<STR_LIT>') is True:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', DeprecationWarning<EOL>)<EOL>duration_group = '<STR_LIT>'<EOL><DEDENT>elif group_by:<EOL><INDENT>duration_group = group_by<EOL><DEDENT>else:<EOL><INDENT>duration_group = '<STR_LIT:path>'<EOL><DEDENT>if callable(duration_group):<EOL><INDENT>duration_group_name = duration_group.__name__<EOL><DEDENT>else:<EOL><INDENT>duration_group_name = duration_group<EOL><DEDENT>if prefix == NO_PREFIX:<EOL><INDENT>prefix = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>prefix = prefix + "<STR_LIT:_>"<EOL><DEDENT>histogram = Histogram(<EOL>'<STR_LIT>' % prefix,<EOL>'<STR_LIT>',<EOL>('<STR_LIT>', duration_group_name, '<STR_LIT:status>'),<EOL>registry=self.registry,<EOL>**buckets_as_kwargs<EOL>)<EOL>counter = Counter(<EOL>'<STR_LIT>' % prefix,<EOL>'<STR_LIT>',<EOL>('<STR_LIT>', '<STR_LIT:status>'),<EOL>registry=self.registry<EOL>)<EOL>self.info(<EOL>'<STR_LIT>' % prefix,<EOL>'<STR_LIT>',<EOL>version=self.version<EOL>)<EOL>def before_request():<EOL><INDENT>request.prom_start_time = default_timer()<EOL><DEDENT>def after_request(response):<EOL><INDENT>if hasattr(request, '<STR_LIT>'):<EOL><INDENT>return response<EOL><DEDENT>if hasattr(request, '<STR_LIT>'):<EOL><INDENT>total_time = max(default_timer() - request.prom_start_time, <NUM_LIT:0>)<EOL>if callable(duration_group):<EOL><INDENT>group = duration_group(request)<EOL><DEDENT>else:<EOL><INDENT>group = getattr(request, duration_group)<EOL><DEDENT>histogram.labels(<EOL>request.method, group, response.status_code<EOL>).observe(total_time)<EOL><DEDENT>counter.labels(request.method, response.status_code).inc()<EOL>return response<EOL><DEDENT>app.before_request(before_request)<EOL>app.after_request(after_request)<EOL> | Export the default metrics:
- HTTP request latencies
- Number of HTTP requests
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `rule`, etc.
(defaults to `path`)
:param prefix: prefix to start the default metrics names with
or `NO_PREFIX` (to skip prefix)
:param app: the Flask application | f2201:c0:m4 |
def histogram(self, name, description, labels=None, **kwargs): | return self._track(<EOL>Histogram,<EOL>lambda metric, time: metric.observe(time),<EOL>kwargs, name, description, labels,<EOL>registry=self.registry<EOL>)<EOL> | Use a Histogram to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Histogram | f2201:c0:m5 |
def summary(self, name, description, labels=None, **kwargs): | return self._track(<EOL>Summary,<EOL>lambda metric, time: metric.observe(time),<EOL>kwargs, name, description, labels,<EOL>registry=self.registry<EOL>)<EOL> | Use a Summary to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Summary | f2201:c0:m6 |
def gauge(self, name, description, labels=None, **kwargs): | return self._track(<EOL>Gauge,<EOL>lambda metric, time: metric.dec(),<EOL>kwargs, name, description, labels,<EOL>registry=self.registry,<EOL>before=lambda metric: metric.inc()<EOL>)<EOL> | Use a Gauge to track the number of invocations in progress
for the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Gauge | f2201:c0:m7 |
def counter(self, name, description, labels=None, **kwargs): | return self._track(<EOL>Counter,<EOL>lambda metric, time: metric.inc(),<EOL>kwargs, name, description, labels,<EOL>registry=self.registry<EOL>)<EOL> | Use a Counter to track the total number of invocations of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Counter | f2201:c0:m8 |
@staticmethod<EOL><INDENT>def _track(metric_type, metric_call, metric_kwargs, name, description, labels,<EOL>registry, before=None):<DEDENT> | if labels is not None and not isinstance(labels, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>label_names = labels.keys() if labels else tuple()<EOL>parent_metric = metric_type(<EOL>name, description, labelnames=label_names, registry=registry,<EOL>**metric_kwargs<EOL>)<EOL>def argspec(func):<EOL><INDENT>if hasattr(inspect, '<STR_LIT>'):<EOL><INDENT>return inspect.getfullargspec(func)<EOL><DEDENT>else:<EOL><INDENT>return inspect.getargspec(func)<EOL><DEDENT><DEDENT>def label_value(f):<EOL><INDENT>if not callable(f):<EOL><INDENT>return lambda x: f<EOL><DEDENT>if argspec(f).args:<EOL><INDENT>return lambda x: f(x)<EOL><DEDENT>else:<EOL><INDENT>return lambda x: f()<EOL><DEDENT><DEDENT>label_generator = tuple(<EOL>(key, label_value(call))<EOL>for key, call in labels.items()<EOL>) if labels else tuple()<EOL>def get_metric(response):<EOL><INDENT>if label_names:<EOL><INDENT>return parent_metric.labels(<EOL>**{key: call(response) for key, call in label_generator}<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return parent_metric<EOL><DEDENT><DEDENT>def decorator(f):<EOL><INDENT>@functools.wraps(f)<EOL>def func(*args, **kwargs):<EOL><INDENT>if before:<EOL><INDENT>metric = get_metric(None)<EOL>before(metric)<EOL><DEDENT>else:<EOL><INDENT>metric = None<EOL><DEDENT>start_time = default_timer()<EOL>try:<EOL><INDENT>response = f(*args, **kwargs)<EOL><DEDENT>except HTTPException as ex:<EOL><INDENT>response = ex<EOL><DEDENT>except Exception as ex:<EOL><INDENT>response = make_response('<STR_LIT>' % ex, <NUM_LIT>)<EOL><DEDENT>total_time = max(default_timer() - start_time, <NUM_LIT:0>)<EOL>if not metric:<EOL><INDENT>response_for_metric = response<EOL>if not isinstance(response, Response):<EOL><INDENT>if request.endpoint == f.__name__:<EOL><INDENT>response_for_metric = make_response(response)<EOL><DEDENT><DEDENT>metric = get_metric(response_for_metric)<EOL><DEDENT>metric_call(metric, time=total_time)<EOL>return response<EOL><DEDENT>return func<EOL><DEDENT>return decorator<EOL> | Internal method decorator logic.
:param metric_type: the type of the metric from the `prometheus_client` library
:param metric_call: the invocation to execute as a callable with `(metric, time)`
:param metric_kwargs: additional keyword arguments for creating the metric
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param before: an optional callable to invoke before executing the
request handler method accepting the single `metric` argument
:param registry: the Prometheus Registry to use | f2201:c0:m9 |
@staticmethod<EOL><INDENT>def do_not_track():<DEDENT> | def decorator(f):<EOL><INDENT>@functools.wraps(f)<EOL>def func(*args, **kwargs):<EOL><INDENT>request.prom_do_not_track = True<EOL>return f(*args, **kwargs)<EOL><DEDENT>return func<EOL><DEDENT>return decorator<EOL> | Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data | f2201:c0:m10 |
def info(self, name, description, labelnames=None, labelvalues=None, **labels): | if labels and labelnames:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if labelnames is None and labels:<EOL><INDENT>labelnames = labels.keys()<EOL><DEDENT>elif labelnames and labelvalues:<EOL><INDENT>for idx, label_name in enumerate(labelnames):<EOL><INDENT>labels[label_name] = labelvalues[idx]<EOL><DEDENT><DEDENT>gauge = Gauge(<EOL>name, description, labelnames or tuple(),<EOL>registry=self.registry<EOL>)<EOL>if labels:<EOL><INDENT>gauge = gauge.labels(**labels)<EOL><DEDENT>gauge.set(<NUM_LIT:1>)<EOL>return gauge<EOL> | Report any information as a Prometheus metric.
This will create a `Gauge` with the initial value of 1.
The easiest way to use it is:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
version='1.0', major=1, minor=0
)
If the order of the labels matters:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
('version', 'major', 'minor'),
('1.0', 1, 0)
)
:param name: the name of the metric
:param description: the description of the metric
:param labelnames: the names of the labels
:param labelvalues: the values of the labels
:param labels: the names and values of the labels
:return: the newly created `Gauge` metric | f2201:c0:m11 |
@property<EOL><INDENT>def toc(self):<DEDENT> | from mistletoe.block_token import List<EOL>def get_indent(level):<EOL><INDENT>if self.omit_title:<EOL><INDENT>level -= <NUM_LIT:1><EOL><DEDENT>return '<STR_LIT:U+0020>' * <NUM_LIT:4> * (level - <NUM_LIT:1>)<EOL><DEDENT>def build_list_item(heading):<EOL><INDENT>level, content = heading<EOL>template = '<STR_LIT>'<EOL>return template.format(indent=get_indent(level), content=content)<EOL><DEDENT>return List([build_list_item(heading) for heading in self._headings])<EOL> | Returns table of contents as a block_token.List instance. | f2217:c0:m1 |
def render_heading(self, token): | rendered = super().render_heading(token)<EOL>content = self.parse_rendered_heading(rendered)<EOL>if not (self.omit_title and token.level == <NUM_LIT:1><EOL>or token.level > self.depth<EOL>or any(cond(content) for cond in self.filter_conds)):<EOL><INDENT>self._headings.append((token.level, content))<EOL><DEDENT>return rendered<EOL> | Overrides super().render_heading; stores rendered heading first,
then returns it. | f2217:c0:m2 |
@staticmethod<EOL><INDENT>def parse_rendered_heading(rendered):<DEDENT> | return re.sub(r'<STR_LIT>', '<STR_LIT>', rendered)<EOL> | Helper method; converts rendered heading to plain text. | f2217:c0:m3 |
def escape_url(raw): | from urllib.parse import quote<EOL>return quote(raw, safe='<STR_LIT>')<EOL> | Escape urls to prevent code injection craziness. (Hopefully.) | f2222:m0 |
def __init__(self, *extras): | self.listTokens = []<EOL>super().__init__(*chain([block_token.HTMLBlock, span_token.HTMLSpan], extras))<EOL> | Args:
extras (list): allows subclasses to add even more custom tokens. | f2222:c0:m0 |
def render_math(self, token): | if token.content.startswith('<STR_LIT>'):<EOL><INDENT>return self.render_raw_text(token)<EOL><DEDENT>return '<STR_LIT>'.format(self.render_raw_text(token))<EOL> | Ensure Math tokens are all enclosed in two dollar signs. | f2223:c0:m0 |
def render_document(self, token): | return super().render_document(token) + self.mathjax_src<EOL> | Append CDN link for MathJax to the end of <body>. | f2223:c0:m1 |
def __init__(self, *extras): | self._suppress_ptag_stack = [False]<EOL>super().__init__(*chain((HTMLBlock, HTMLSpan), extras))<EOL>self._stdlib_charref = html._charref<EOL>_charref = re.compile(r'<STR_LIT>'<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>')<EOL>html._charref = _charref<EOL> | Args:
extras (list): allows subclasses to add even more custom tokens. | f2224:c0:m0 |
@staticmethod<EOL><INDENT>def escape_url(raw):<DEDENT> | return html.escape(quote(html.unescape(raw), safe='<STR_LIT>'))<EOL> | Escape urls to prevent code injection craziness. (Hopefully.) | f2224:c0:m27 |
def convert_file(filename, renderer): | try:<EOL><INDENT>with open(filename, '<STR_LIT:r>') as fin:<EOL><INDENT>rendered = mistletoe.markdown(fin, renderer)<EOL>print(rendered, end='<STR_LIT>')<EOL><DEDENT><DEDENT>except OSError:<EOL><INDENT>sys.exit('<STR_LIT>'.format(filename))<EOL><DEDENT> | Parse a Markdown file and dump the output to stdout. | f2225:m2 |
def interactive(renderer): | _import_readline()<EOL>_print_heading(renderer)<EOL>contents = []<EOL>more = False<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>prompt, more = ('<STR_LIT>', True) if more else ('<STR_LIT>', True)<EOL>contents.append(input(prompt) + '<STR_LIT:\n>')<EOL><DEDENT>except EOFError:<EOL><INDENT>print('<STR_LIT:\n>' + mistletoe.markdown(contents, renderer), end='<STR_LIT>')<EOL>more = False<EOL>contents = []<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>print('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT> | Parse user input, dump to stdout, rinse and repeat.
Python REPL style. | f2225:m3 |
def tokenize(iterable, token_types): | return make_tokens(tokenize_block(iterable, token_types))<EOL> | Searches for token_types in iterable.
Args:
iterable (list): user input lines to be parsed.
token_types (list): a list of block-level token constructors.
Returns:
block-level token instances. | f2226:m0 |
def tokenize_block(iterable, token_types): | lines = FileWrapper(iterable)<EOL>parse_buffer = ParseBuffer()<EOL>line = lines.peek()<EOL>while line is not None:<EOL><INDENT>for token_type in token_types:<EOL><INDENT>if token_type.start(line):<EOL><INDENT>result = token_type.read(lines)<EOL>if result is not None:<EOL><INDENT>parse_buffer.append((token_type, result))<EOL>break<EOL><DEDENT><DEDENT><DEDENT>else: <EOL><INDENT>next(lines)<EOL>parse_buffer.loose = True<EOL><DEDENT>line = lines.peek()<EOL><DEDENT>return parse_buffer<EOL> | Returns a list of pairs (token_type, read_result).
Footnotes are parsed here, but span-level parsing has not
started yet. | f2226:m1 |
def make_tokens(parse_buffer): | tokens = []<EOL>for token_type, result in parse_buffer:<EOL><INDENT>token = token_type(result)<EOL>if token is not None:<EOL><INDENT>tokens.append(token)<EOL><DEDENT><DEDENT>return tokens<EOL> | Takes a list of pairs (token_type, read_result) and
applies token_type(read_result).
Footnotes are already parsed before this point,
and span-level parsing is started here. | f2226:m2 |
def main(): | cli.main(sys.argv[<NUM_LIT:1>:])<EOL> | Entry point. | f2227:m0 |
def tokenize(lines): | return tokenizer.tokenize(lines, _token_types)<EOL> | A wrapper around block_tokenizer.tokenize. Pass in all block-level
token constructors as arguments to block_tokenizer.tokenize.
Doing so (instead of importing block_token module in block_tokenizer)
avoids cyclic dependency issues, and allows for future injections of
custom token classes.
_token_types variable is at the bottom of this module.
See also: block_tokenizer.tokenize, span_token.tokenize_inner. | f2228:m0 |
def add_token(token_cls, position=<NUM_LIT:0>): | _token_types.insert(position, token_cls)<EOL> | Allows external manipulation of the parsing process.
This function is usually called in BaseRenderer.__enter__.
Arguments:
token_cls (SpanToken): token to be included in the parsing process.
position (int): the position for the token class to be inserted into. | f2228:m1 |
def remove_token(token_cls): | _token_types.remove(token_cls)<EOL> | Allows external manipulation of the parsing process.
This function is usually called in BaseRenderer.__exit__.
Arguments:
token_cls (BlockToken): token to be removed from the parsing process. | f2228:m2 |
def reset_tokens(): | global _token_types<EOL>_token_types = [globals()[cls_name] for cls_name in __all__]<EOL> | Resets global _token_types to all token classes in __all__. | f2228:m3 |
@classmethod<EOL><INDENT>def parse_marker(cls, line):<DEDENT> | match_obj = cls.pattern.match(line)<EOL>if match_obj is None:<EOL><INDENT>return None <EOL><DEDENT>leader = match_obj.group(<NUM_LIT:1>)<EOL>content = match_obj.group(<NUM_LIT:0>).replace(leader+'<STR_LIT:\t>', leader+'<STR_LIT:U+0020>', <NUM_LIT:1>)<EOL>prepend = len(content)<EOL>if prepend == len(line.rstrip('<STR_LIT:\n>')):<EOL><INDENT>prepend = match_obj.end(<NUM_LIT:1>) + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>spaces = match_obj.group(<NUM_LIT:2>)<EOL>if spaces.startswith('<STR_LIT:\t>'):<EOL><INDENT>spaces = spaces.replace('<STR_LIT:\t>', '<STR_LIT:U+0020>', <NUM_LIT:1>)<EOL><DEDENT>spaces = spaces.replace('<STR_LIT:\t>', '<STR_LIT:U+0020>')<EOL>n_spaces = len(spaces)<EOL>if n_spaces > <NUM_LIT:4>:<EOL><INDENT>prepend = match_obj.end(<NUM_LIT:1>) + <NUM_LIT:1><EOL><DEDENT><DEDENT>return prepend, leader<EOL> | Returns a pair (prepend, leader) iff the line has a valid leader. | f2228:c9:m3 |
@staticmethod<EOL><INDENT>def split_delimiter(delimiter):<DEDENT> | return re.findall(r'<STR_LIT>', delimiter)<EOL> | Helper function; returns a list of align options.
Args:
delimiter (str): e.g.: "| :--- | :---: | ---: |\n"
Returns:
a list of align options (None, 0 or 1). | f2228:c10:m1 |
@staticmethod<EOL><INDENT>def parse_align(column):<DEDENT> | return (<NUM_LIT:0> if column[<NUM_LIT:0>] == '<STR_LIT::>' else <NUM_LIT:1>) if column[-<NUM_LIT:1>] == '<STR_LIT::>' else None<EOL> | Helper function; returns align option from cell content.
Returns:
None if align = left;
0 if align = center;
1 if align = right. | f2228:c10:m2 |
def get_ast(token): | node = {}<EOL>node['<STR_LIT:type>'] = token.__class__.__name__<EOL>node.update({key: token.__dict__[key] for key in token.__dict__})<EOL>if '<STR_LIT>' in node:<EOL><INDENT>node['<STR_LIT>'] = get_ast(node['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in node:<EOL><INDENT>node['<STR_LIT>'] = [get_ast(child) for child in node['<STR_LIT>']]<EOL><DEDENT>return node<EOL> | Recursively unrolls token attributes into dictionaries (token.children
into lists).
Returns:
a dictionary of token's attributes. | f2229:m0 |
def render(self, token): | return json.dumps(get_ast(token), indent=<NUM_LIT:2>) + '<STR_LIT:\n>'<EOL> | Returns the string representation of the AST.
Overrides super().render. Delegates the logic to get_ast. | f2229:c0:m0 |
def __init__(self, *extras): | tokens = self._tokens_from_module(latex_token)<EOL>self.packages = {}<EOL>super().__init__(*chain(tokens, extras))<EOL> | Args:
extras (list): allows subclasses to add even more custom tokens. | f2231:c0:m0 |
def markdown(iterable, renderer=HTMLRenderer): | with renderer() as renderer:<EOL><INDENT>return renderer.render(Document(iterable))<EOL><DEDENT> | Output HTML with default settings.
Enables inline and block-level HTML tags. | f2232:m0 |
def escape(s, quote=True): | s = s.replace("<STR_LIT:&>", "<STR_LIT>") <EOL>s = s.replace("<STR_LIT:<>", "<STR_LIT>")<EOL>s = s.replace("<STR_LIT:>>", "<STR_LIT>")<EOL>if quote:<EOL><INDENT>s = s.replace('<STR_LIT:">', "<STR_LIT>")<EOL>s = s.replace('<STR_LIT>', "<STR_LIT>")<EOL><DEDENT>return s<EOL> | Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated. | f2235:m0 |
def unescape(s): | if '<STR_LIT:&>' not in s:<EOL><INDENT>return s<EOL><DEDENT>return _charref.sub(_replace_charref, s)<EOL> | Convert all named and numeric character references (e.g. >, >,
&x3e;) in the string s to the corresponding unicode characters.
This function uses the rules defined by the HTML 5 standard
for both valid and invalid character references, and the list of
HTML 5 named character references defined in html.entities.html5. | f2235:m2 |
def render(self, token): | return self.render_map[token.__class__.__name__](token)<EOL> | Grabs the class name from input token and finds its corresponding
render function.
Basically a janky way to do polymorphism.
Arguments:
token: whose __class__.__name__ is in self.render_map. | f2236:c0:m1 |
def render_inner(self, token): | rendered = [self.render(child) for child in token.children]<EOL>return '<STR_LIT>'.join(rendered)<EOL> | Recursively renders child tokens. Joins the rendered
strings with no space in between.
If newlines / spaces are needed between tokens, add them
in their respective templates, or override this function
in the renderer subclass, so that whitespace won't seem to
appear magically for anyone reading your program.
Arguments:
token: a branch node who has children attribute. | f2236:c0:m2 |
def __enter__(self): | return self<EOL> | Make renderer classes into context managers. | f2236:c0:m3 |
def __exit__(self, exception_type, exception_val, traceback): | block_token.reset_tokens()<EOL>span_token.reset_tokens()<EOL> | Make renderer classes into context managers.
Reset block_token._token_types and span_token._token_types. | f2236:c0:m4 |
@staticmethod<EOL><INDENT>def _tokens_from_module(module):<DEDENT> | return [getattr(module, name) for name in module.__all__]<EOL> | Helper method; takes a module and returns a list of all token classes
specified in module.__all__. Useful when custom tokens are defined in a
separate module. | f2236:c0:m6 |
def render_raw_text(self, token): | return token.content<EOL> | Default render method for RawText. Simply return token.content. | f2236:c0:m7 |
def __getattr__(self, name): | if not name.startswith('<STR_LIT>'):<EOL><INDENT>msg = '<STR_LIT>'.format(cls=type(self).__name__, name=name)<EOL>raise AttributeError(msg).with_traceback(sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>return self.render_inner<EOL> | Provides a default render method for all tokens.
Any token without a custom render method will simply be rendered by
self.render_inner.
If name does not start with 'render_', raise AttributeError as normal,
for less magic during debugging.
This method would only be called if the attribute requested has not
been defined. Defined attributes will not be overridden.
I still think this is heavy wizardry.
Let me know if you would like this method removed. | f2236:c0:m8 |
def tokenize_inner(content): | return tokenizer.tokenize(content, _token_types)<EOL> | A wrapper around span_tokenizer.tokenize. Pass in all span-level token
constructors as arguments to span_tokenizer.tokenize.
Doing so (instead of importing span_token module in span_tokenizer)
avoids cyclic dependency issues, and allows for future injections of
custom token classes.
_token_types variable is at the bottom of this module.
See also: span_tokenizer.tokenize, block_token.tokenize. | f2237:m0 |
def add_token(token_cls, position=<NUM_LIT:1>): | _token_types.insert(position, token_cls)<EOL> | Allows external manipulation of the parsing process.
This function is called in BaseRenderer.__enter__.
Arguments:
token_cls (SpanToken): token to be included in the parsing process. | f2237:m1 |
def remove_token(token_cls): | _token_types.remove(token_cls)<EOL> | Allows external manipulation of the parsing process.
This function is called in BaseRenderer.__exit__.
Arguments:
token_cls (SpanToken): token to be removed from the parsing process. | f2237:m2 |
def reset_tokens(): | global _token_types<EOL>_token_types = [globals()[cls_name] for cls_name in __all__]<EOL> | Resets global _token_types to all token classes in __all__. | f2237:m3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.