language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
global frontier, node, explored, counter
if counter == -1:
f = memoize(f, 'f')
node = Node(problem.initial)
display_current(node)
if problem.goal_test(node.state):
return node
frontier = PriorityQueue(min, f)
frontier.append(node)
display_frontier(frontier)
explored = set()
if counter % 3 == 0 and counter >= 0:
node = frontier.pop()
display_current(node)
if problem.goal_test(node.state):
return node
explored.add(node.state)
if counter % 3 == 1 and counter >= 0:
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
elif child in frontier:
incumbent = frontier[child]
if f(child) < f(incumbent):
del frontier[incumbent]
frontier.append(child)
display_frontier(frontier)
if counter % 3 == 2 and counter >= 0:
display_explored(node)
return None | def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
global frontier, node, explored, counter
if counter == -1:
f = memoize(f, 'f')
node = Node(problem.initial)
display_current(node)
if problem.goal_test(node.state):
return node
frontier = PriorityQueue(min, f)
frontier.append(node)
display_frontier(frontier)
explored = set()
if counter % 3 == 0 and counter >= 0:
node = frontier.pop()
display_current(node)
if problem.goal_test(node.state):
return node
explored.add(node.state)
if counter % 3 == 1 and counter >= 0:
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
elif child in frontier:
incumbent = frontier[child]
if f(child) < f(incumbent):
del frontier[incumbent]
frontier.append(child)
display_frontier(frontier)
if counter % 3 == 2 and counter >= 0:
display_explored(node)
return None |
Python | def on_click():
'''
This function defines the action of the 'Next' button.
'''
global algo, counter, next_button, romania_problem, start, goal
romania_problem = GraphProblem(start.get(), goal.get(), romania_map)
if "Breadth-First Tree Search" == algo.get():
node = breadth_first_tree_search(romania_problem)
if node is not None:
final_path = bfts(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Depth-First Tree Search" == algo.get():
node = depth_first_tree_search(romania_problem)
if node is not None:
final_path = dfts(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Breadth-First Search" == algo.get():
node = breadth_first_search(romania_problem)
if node is not None:
final_path = bfs(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Depth-First Graph Search" == algo.get():
node = depth_first_graph_search(romania_problem)
if node is not None:
final_path = dfgs(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Uniform Cost Search" == algo.get():
node = uniform_cost_search(romania_problem)
if node is not None:
final_path = ucs(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "A* - Search" == algo.get():
node = astar_search(romania_problem)
if node is not None:
final_path = asts(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1 | def on_click():
'''
This function defines the action of the 'Next' button.
'''
global algo, counter, next_button, romania_problem, start, goal
romania_problem = GraphProblem(start.get(), goal.get(), romania_map)
if "Breadth-First Tree Search" == algo.get():
node = breadth_first_tree_search(romania_problem)
if node is not None:
final_path = bfts(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Depth-First Tree Search" == algo.get():
node = depth_first_tree_search(romania_problem)
if node is not None:
final_path = dfts(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Breadth-First Search" == algo.get():
node = breadth_first_search(romania_problem)
if node is not None:
final_path = bfs(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Depth-First Graph Search" == algo.get():
node = depth_first_graph_search(romania_problem)
if node is not None:
final_path = dfgs(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "Uniform Cost Search" == algo.get():
node = uniform_cost_search(romania_problem)
if node is not None:
final_path = ucs(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1
elif "A* - Search" == algo.get():
node = astar_search(romania_problem)
if node is not None:
final_path = asts(romania_problem).solution()
final_path.append(start.get())
display_final(final_path)
next_button.config(state="disabled")
counter += 1 |
Python | def report_to_summary(self, report):
"""Parses a WPT report log object into a file-wise summary."""
test_files = {}
for result in report['results']:
test_file = result['test']
# We expect report to output only one entry per test.
assert test_file not in test_files, (
'Assumption that each test_file only shows up once broken!')
if result['status'] in ('OK', 'PASS'):
test_files[test_file] = [1, 1]
else:
test_files[test_file] = [0, 1]
for subtest in result['subtests']:
if subtest['status'] == 'PASS':
test_files[test_file][0] += 1
test_files[test_file][1] += 1
return test_files | def report_to_summary(self, report):
"""Parses a WPT report log object into a file-wise summary."""
test_files = {}
for result in report['results']:
test_file = result['test']
# We expect report to output only one entry per test.
assert test_file not in test_files, (
'Assumption that each test_file only shows up once broken!')
if result['status'] in ('OK', 'PASS'):
test_files[test_file] = [1, 1]
else:
test_files[test_file] = [0, 1]
for subtest in result['subtests']:
if subtest['status'] == 'PASS':
test_files[test_file][0] += 1
test_files[test_file][1] += 1
return test_files |
Python | def fetchResults(self, spec): # type: (TestRunSpec) -> TestRunSummary
'''Fetch a python object representing the test run results JSON for the
given sha/platform spec. '''
# type: (str, str) -> dict
# Note that the dict's keys are the test paths, and values are an
# array of [pass_count, total_test_count].
# For example JSON output, see https://wpt.fyi/results?platform=chrome
encodedArgs = urlencode({'sha': spec.sha, 'platform': spec.platform})
url = 'https://wpt.fyi/results?' + encodedArgs
try:
response = self.pool.request('GET', url, redirect=False)
except urllib3.exceptions.SSLError as e:
logging.warning('SSL error fetching %s: %s' % (url, e.message))
return None
if response.status // 100 != 3:
logging.warning(
'Got unexpected non-redirect result %d for url %s'
% (response.status, url))
return None
loadedUrl = response.headers['location']
response = self.pool.request('GET', loadedUrl)
if response.status != 200:
logging.warning('Failed to fetch %s' % (url))
return None
logging.debug('Processing JSON from %s' % (url))
return TestRunSummary(spec, json.loads(response.data.decode('utf-8'))) | def fetchResults(self, spec): # type: (TestRunSpec) -> TestRunSummary
'''Fetch a python object representing the test run results JSON for the
given sha/platform spec. '''
# type: (str, str) -> dict
# Note that the dict's keys are the test paths, and values are an
# array of [pass_count, total_test_count].
# For example JSON output, see https://wpt.fyi/results?platform=chrome
encodedArgs = urlencode({'sha': spec.sha, 'platform': spec.platform})
url = 'https://wpt.fyi/results?' + encodedArgs
try:
response = self.pool.request('GET', url, redirect=False)
except urllib3.exceptions.SSLError as e:
logging.warning('SSL error fetching %s: %s' % (url, e.message))
return None
if response.status // 100 != 3:
logging.warning(
'Got unexpected non-redirect result %d for url %s'
% (response.status, url))
return None
loadedUrl = response.headers['location']
response = self.pool.request('GET', loadedUrl)
if response.status != 200:
logging.warning('Failed to fetch %s' % (url))
return None
logging.debug('Processing JSON from %s' % (url))
return TestRunSummary(spec, json.loads(response.data.decode('utf-8'))) |
Python | def walk(self):
'''
This method will generate the walks.
:param edges: List of edges
:type edges: mowl.graph.edge.Edge
'''
raise NotImplementedError() | def walk(self):
'''
This method will generate the walks.
:param edges: List of edges
:type edges: mowl.graph.edge.Edge
'''
raise NotImplementedError() |
Python | def insert_annotations(ontology_file, annotations, out_file = None, verbose=False):
"""
Method to build dataset given an ontology file and the annotations to be inserted to the ontology. Annotation files must be in .tsv format, with no header. Per each row, the first element is the annotated entity and the rest of the elements are the annotation entities (which are the entities in the ontology).
:param ontology_file: Ontology file in .owl format
:type ontology_file: str
:param annotations: Annotations to be included in the ontology. There can be more than one annotation file.
:type annotations: List of (str, str, str) corresponding to (annotation file path, relation name, annotations prefix)
:param out_file: Path for the new ontology.
:type out_file: str
:param verbose: If true, information is shown."
:type verbose: bool
"""
if verbose:
logging.basicConfig(level = logging.INFO)
if out_file is None:
out_file = ontology_file
manager = OWLManager.createOWLOntologyManager()
ont = manager.loadOntologyFromOntologyDocument(java.io.File(ontology_file))
owl_format = manager.getOntologyFormat(ont)
if owl_format.isPrefixOWLOntologyFormat():
ont_prefixes = owl_format.asPrefixOWLOntologyFormat().getPrefixName2PrefixMap()
ont_prefixes = dict(ont_prefixes).values()
factory = manager.getOWLDataFactory()
for annots_file, relation_name, directed in annotations:
relation = factory.getOWLObjectProperty(IRI.create(f"{relation_name}"))
with open(annots_file) as f:
for line in f:
items = line.strip().split("\t")
annotating_entity = items[0]
annotating_entity = factory.getOWLClass(IRI.create(f"{annotating_entity}"))
for ont_id in items[1:]:
ont_class = factory.getOWLClass(IRI.create(f"{ont_id}"))
objSomeValsAxiom = factory.getOWLObjectSomeValuesFrom(relation, ont_class)
axiom = factory.getOWLSubClassOfAxiom(annotating_entity, objSomeValsAxiom)
manager.addAxiom(ont, axiom)
if not directed:
objSomeValsAxiom = factory.getOWLObjectSomeValuesFrom(relation, annotating_entity)
axiom = factory.getOWLSubClassOfAxiom(ont_class, objSomeValsAxiom)
manager.addAxiom(ont, axiom)
manager.saveOntology(ont, IRI.create("file:" + os.path.abspath(out_file))) | def insert_annotations(ontology_file, annotations, out_file = None, verbose=False):
"""
Method to build dataset given an ontology file and the annotations to be inserted to the ontology. Annotation files must be in .tsv format, with no header. Per each row, the first element is the annotated entity and the rest of the elements are the annotation entities (which are the entities in the ontology).
:param ontology_file: Ontology file in .owl format
:type ontology_file: str
:param annotations: Annotations to be included in the ontology. There can be more than one annotation file.
:type annotations: List of (str, str, str) corresponding to (annotation file path, relation name, annotations prefix)
:param out_file: Path for the new ontology.
:type out_file: str
:param verbose: If true, information is shown."
:type verbose: bool
"""
if verbose:
logging.basicConfig(level = logging.INFO)
if out_file is None:
out_file = ontology_file
manager = OWLManager.createOWLOntologyManager()
ont = manager.loadOntologyFromOntologyDocument(java.io.File(ontology_file))
owl_format = manager.getOntologyFormat(ont)
if owl_format.isPrefixOWLOntologyFormat():
ont_prefixes = owl_format.asPrefixOWLOntologyFormat().getPrefixName2PrefixMap()
ont_prefixes = dict(ont_prefixes).values()
factory = manager.getOWLDataFactory()
for annots_file, relation_name, directed in annotations:
relation = factory.getOWLObjectProperty(IRI.create(f"{relation_name}"))
with open(annots_file) as f:
for line in f:
items = line.strip().split("\t")
annotating_entity = items[0]
annotating_entity = factory.getOWLClass(IRI.create(f"{annotating_entity}"))
for ont_id in items[1:]:
ont_class = factory.getOWLClass(IRI.create(f"{ont_id}"))
objSomeValsAxiom = factory.getOWLObjectSomeValuesFrom(relation, ont_class)
axiom = factory.getOWLSubClassOfAxiom(annotating_entity, objSomeValsAxiom)
manager.addAxiom(ont, axiom)
if not directed:
objSomeValsAxiom = factory.getOWLObjectSomeValuesFrom(relation, annotating_entity)
axiom = factory.getOWLSubClassOfAxiom(ont_class, objSomeValsAxiom)
manager.addAxiom(ont, axiom)
manager.saveOntology(ont, IRI.create("file:" + os.path.abspath(out_file))) |
Python | def create_from_triples(
triples_file,
out_file,
relation_name = None,
bidirectional = False,
head_prefix = PREFIXES["default"],
tail_prefix = PREFIXES["default"]
):
"""Method to create an ontology from a .tsv file with triples.
:param triples_file: Path for the file containing the triples. This file must be a `.tsv` file and each row must be of the form (head, relation, tail). It is also supported `.tsv` files with rows of the form (head, tail); in that case the field `relation_name` must be specified.
:type triples_file: str
:param relation_name: Name for relation in case the `.tsv` input file has only two columns.
:type relation_name: str
:param bidirectional: If `True`, the triples will be considered undirected.
:type bidirectional: bool
:param out_file: Path for the output ontology. If `None` and an existing ontology is input, the existing ontology will be overwritten.
:type out_file: str
:param head_prefix: Prefix to be assigned to the head of each triple. Default is `http://default/mowl/`
:type head_prefix: str
:param tail_prefix: Prefix to be assigned to the tail of each triple. Default is `http://default/mowl/`
:type tail_prefix: str
"""
manager = OWLManager.createOWLOntologyManager()
factory = manager.getOWLDataFactory()
ont = manager.createOntology()
with open(triples_file, "r") as f:
for line in f:
line = tuple(line.strip().split("\t"))
if len(line) < 2 or len(line) > 3:
raise ValueError(f"Expected number of elements in triple to be 2 or 3. Got {len(line)}")
if len(line) == 2 and relation_name is None:
raise ValueError("Found 2 elements in triple but the relation_name field is None")
if len(line) == 2:
head, tail = line
rel = relation_name
if len(line) == 3:
head, rel, tail = line
head = factory.getOWLClass(IRI.create(f"{head_prefix}{head}"))
rel = factory.getOWLObjectProperty(IRI.create(f"{rel}"))
tail = factory.getOWLClass(IRI.create(f"{tail_prefix}{tail}"))
axiom = factory.getOWLSubClassOfAxiom(
head, factory.getOWLObjectSomeValuesFrom(
rel, tail))
manager.addAxiom(ont, axiom)
if bidirectional:
axiom = factory.getOWLSubClassOfAxiom(
tail, factory.getOWLObjectSomeValuesFrom(
rel, head))
manager.addAxiom(ont, axiom)
manager.saveOntology(ont, IRI.create("file:" + os.path.abspath(out_file))) | def create_from_triples(
triples_file,
out_file,
relation_name = None,
bidirectional = False,
head_prefix = PREFIXES["default"],
tail_prefix = PREFIXES["default"]
):
"""Method to create an ontology from a .tsv file with triples.
:param triples_file: Path for the file containing the triples. This file must be a `.tsv` file and each row must be of the form (head, relation, tail). It is also supported `.tsv` files with rows of the form (head, tail); in that case the field `relation_name` must be specified.
:type triples_file: str
:param relation_name: Name for relation in case the `.tsv` input file has only two columns.
:type relation_name: str
:param bidirectional: If `True`, the triples will be considered undirected.
:type bidirectional: bool
:param out_file: Path for the output ontology. If `None` and an existing ontology is input, the existing ontology will be overwritten.
:type out_file: str
:param head_prefix: Prefix to be assigned to the head of each triple. Default is `http://default/mowl/`
:type head_prefix: str
:param tail_prefix: Prefix to be assigned to the tail of each triple. Default is `http://default/mowl/`
:type tail_prefix: str
"""
manager = OWLManager.createOWLOntologyManager()
factory = manager.getOWLDataFactory()
ont = manager.createOntology()
with open(triples_file, "r") as f:
for line in f:
line = tuple(line.strip().split("\t"))
if len(line) < 2 or len(line) > 3:
raise ValueError(f"Expected number of elements in triple to be 2 or 3. Got {len(line)}")
if len(line) == 2 and relation_name is None:
raise ValueError("Found 2 elements in triple but the relation_name field is None")
if len(line) == 2:
head, tail = line
rel = relation_name
if len(line) == 3:
head, rel, tail = line
head = factory.getOWLClass(IRI.create(f"{head_prefix}{head}"))
rel = factory.getOWLObjectProperty(IRI.create(f"{rel}"))
tail = factory.getOWLClass(IRI.create(f"{tail_prefix}{tail}"))
axiom = factory.getOWLSubClassOfAxiom(
head, factory.getOWLObjectSomeValuesFrom(
rel, tail))
manager.addAxiom(ont, axiom)
if bidirectional:
axiom = factory.getOWLSubClassOfAxiom(
tail, factory.getOWLObjectSomeValuesFrom(
rel, head))
manager.addAxiom(ont, axiom)
manager.saveOntology(ont, IRI.create("file:" + os.path.abspath(out_file))) |
Python | def evaluate_ppi(self, ppi_axiom_properties=['<http://interacts_with>']):
"""
Evaluate predicted protein-protein interactions relative to the test ontology, which has the set of interactions kept back from model training.
"""
self.train_or_load_model()
model = self.w2v_model
training_classes, training_classes_pairs = self.get_classes_pairs_from_axioms(self.dataset.ontology, ppi_axiom_properties)
_, testing_classes_pairs = self.get_classes_pairs_from_axioms(self.dataset.testing, ppi_axiom_properties)
# some classes in the training set don't make it into the model (maybe their frequency is too low)
available_training_classes = [c for c in training_classes if c in model.wv]
class_to_index = {available_training_classes[i]: i for i in range(0, len(available_training_classes))}
# dict "protein-index-1 => set( protein-indexes-2 )" of the trained PPI pairs
training_pairs_exclude_indexes = dict()
for training_pair in training_classes_pairs:
i1 = class_to_index.get(training_pair[0])
i2 = class_to_index.get(training_pair[1])
if i1 is not None and i2 is not None:
exclude_ids_set = training_pairs_exclude_indexes.get(i1, set())
training_pairs_exclude_indexes[i1] = exclude_ids_set
exclude_ids_set.add(i2)
testing_classes_pairs = sorted(testing_classes_pairs, key=lambda pair: pair[0])
embeddings = model.wv[available_training_classes]
observed_ranks = list()
previous_i1 = None # to preserve memory, we compare one protein to all the others at a time
for testing_pair in testing_classes_pairs:
i1 = class_to_index.get(testing_pair[0])
i2 = class_to_index.get(testing_pair[1])
if i1 is not None and i2 is not None:
# prepare a new row of class comparisons
if previous_i1 != i1:
previous_i1 = i1
# Word2Vec.n_similarity only returns an aggregated similarity of all vectors, so staying with this:
class_distances = pairwise_distances([embeddings[i1]], embeddings, metric='cosine')[0]
# disregard the protein-protein interactions which came naturally from the training set
exclude_ids_set = training_pairs_exclude_indexes.get(i1, set())
for exclude_i2 in exclude_ids_set:
class_distances[exclude_i2] = MAX_FLOAT
# disregard the similarity of protein with itself
class_distances[i1] = MAX_FLOAT
# For each protein, it is ranked how similar (per the model) it is to the current i1.
# The lower the rank, the higher the protein similarity.
ranked_indexes = rankdata(class_distances, method='average')
observed_ranks.append(ranked_indexes[i2])
# We queried the similarity ranks of all the testing set protein-protein interactions, and collected the
# ranks in observed_ranks. Let's bin the ranks and see if good ranks appear more often, and also
# calculate the mean rank.
histogram = np.histogram(observed_ranks, bins=[0, 1.1, 10.1, 100.1, 10000000])[0]
rank_1 = histogram[0]
rank_10 = histogram[0] + histogram[1]
rank_100 = histogram[0] + histogram[1] + histogram[2]
return(np.mean(observed_ranks), rank_1, rank_10, rank_100) | def evaluate_ppi(self, ppi_axiom_properties=['<http://interacts_with>']):
"""
Evaluate predicted protein-protein interactions relative to the test ontology, which has the set of interactions kept back from model training.
"""
self.train_or_load_model()
model = self.w2v_model
training_classes, training_classes_pairs = self.get_classes_pairs_from_axioms(self.dataset.ontology, ppi_axiom_properties)
_, testing_classes_pairs = self.get_classes_pairs_from_axioms(self.dataset.testing, ppi_axiom_properties)
# some classes in the training set don't make it into the model (maybe their frequency is too low)
available_training_classes = [c for c in training_classes if c in model.wv]
class_to_index = {available_training_classes[i]: i for i in range(0, len(available_training_classes))}
# dict "protein-index-1 => set( protein-indexes-2 )" of the trained PPI pairs
training_pairs_exclude_indexes = dict()
for training_pair in training_classes_pairs:
i1 = class_to_index.get(training_pair[0])
i2 = class_to_index.get(training_pair[1])
if i1 is not None and i2 is not None:
exclude_ids_set = training_pairs_exclude_indexes.get(i1, set())
training_pairs_exclude_indexes[i1] = exclude_ids_set
exclude_ids_set.add(i2)
testing_classes_pairs = sorted(testing_classes_pairs, key=lambda pair: pair[0])
embeddings = model.wv[available_training_classes]
observed_ranks = list()
previous_i1 = None # to preserve memory, we compare one protein to all the others at a time
for testing_pair in testing_classes_pairs:
i1 = class_to_index.get(testing_pair[0])
i2 = class_to_index.get(testing_pair[1])
if i1 is not None and i2 is not None:
# prepare a new row of class comparisons
if previous_i1 != i1:
previous_i1 = i1
# Word2Vec.n_similarity only returns an aggregated similarity of all vectors, so staying with this:
class_distances = pairwise_distances([embeddings[i1]], embeddings, metric='cosine')[0]
# disregard the protein-protein interactions which came naturally from the training set
exclude_ids_set = training_pairs_exclude_indexes.get(i1, set())
for exclude_i2 in exclude_ids_set:
class_distances[exclude_i2] = MAX_FLOAT
# disregard the similarity of protein with itself
class_distances[i1] = MAX_FLOAT
# For each protein, it is ranked how similar (per the model) it is to the current i1.
# The lower the rank, the higher the protein similarity.
ranked_indexes = rankdata(class_distances, method='average')
observed_ranks.append(ranked_indexes[i2])
# We queried the similarity ranks of all the testing set protein-protein interactions, and collected the
# ranks in observed_ranks. Let's bin the ranks and see if good ranks appear more often, and also
# calculate the mean rank.
histogram = np.histogram(observed_ranks, bins=[0, 1.1, 10.1, 100.1, 10000000])[0]
rank_1 = histogram[0]
rank_10 = histogram[0] + histogram[1]
rank_100 = histogram[0] + histogram[1] + histogram[2]
return(np.mean(observed_ranks), rank_1, rank_10, rank_100) |
Python | def hashivault_default_token():
"""Get a default Vault token from an environment variable or a file."""
if 'VAULT_TOKEN' in os.environ:
return os.environ['VAULT_TOKEN']
token_file = os.path.expanduser('~/.vault-token')
if os.path.exists(token_file):
with open(token_file, 'r') as f:
return f.read().strip()
return '' | def hashivault_default_token():
"""Get a default Vault token from an environment variable or a file."""
if 'VAULT_TOKEN' in os.environ:
return os.environ['VAULT_TOKEN']
token_file = os.path.expanduser('~/.vault-token')
if os.path.exists(token_file):
with open(token_file, 'r') as f:
return f.read().strip()
return '' |
Python | def check_secrets_engines(module, client):
"""Checks if secrets engine is mounted
:param module: Ansible module. Must contain mount_point in parameters.
:param mounted: HVAC client
:return: change status, error
:rtype: (bool, dict)
"""
changed = False
err = None
try:
if (module.params.get('mount_point') + "/") not in client.sys.list_mounted_secrets_engines()['data'].keys():
err = {'failed': True, 'msg': 'secret engine is not enabled', 'rc': 1}
except Exception:
if module.check_mode:
changed = True
else:
err = {'failed': True, 'msg': 'secret engine is not enabled or namespace does not exist', 'rc': 1}
return changed, err | def check_secrets_engines(module, client):
"""Checks if secrets engine is mounted
:param module: Ansible module. Must contain mount_point in parameters.
:param mounted: HVAC client
:return: change status, error
:rtype: (bool, dict)
"""
changed = False
err = None
try:
if (module.params.get('mount_point') + "/") not in client.sys.list_mounted_secrets_engines()['data'].keys():
err = {'failed': True, 'msg': 'secret engine is not enabled', 'rc': 1}
except Exception:
if module.check_mode:
changed = True
else:
err = {'failed': True, 'msg': 'secret engine is not enabled or namespace does not exist', 'rc': 1}
return changed, err |
Python | def check_auth_methods(module, client):
"""Checks if auth engine is mounted
:param module: Ansible module. Must contain mount_point in parameters.
:param mounted: HVAC client
:return: change status, error
:rtype: (bool, dict)
"""
changed = False
err = None
try:
if (module.params.get('mount_point') + "/") not in client.sys.list_auth_methods()['data'].keys():
err = {'failed': True, 'msg': 'auth method is not enabled', 'rc': 1}
except Exception:
if module.check_mode:
changed = True
else:
err = {'failed': True, 'msg': 'auth mount is not enabled or namespace does not exist', 'rc': 1}
return changed, err | def check_auth_methods(module, client):
"""Checks if auth engine is mounted
:param module: Ansible module. Must contain mount_point in parameters.
:param mounted: HVAC client
:return: change status, error
:rtype: (bool, dict)
"""
changed = False
err = None
try:
if (module.params.get('mount_point') + "/") not in client.sys.list_auth_methods()['data'].keys():
err = {'failed': True, 'msg': 'auth method is not enabled', 'rc': 1}
except Exception:
if module.check_mode:
changed = True
else:
err = {'failed': True, 'msg': 'auth mount is not enabled or namespace does not exist', 'rc': 1}
return changed, err |
Python | def check_pki_role(name, mount_point, client):
"""Checks if role is prtesent in secrets engine
:param module: Ansible module. Must contain mount_point in parameters.
:param mounted: HVAC client
:return: change status, error
:rtype: (bool, dict)
"""
try:
return client.secrets.pki.read_role(name=name, mount_point=mount_point).get('data')
except Exception:
return None | def check_pki_role(name, mount_point, client):
"""Checks if role is prtesent in secrets engine
:param module: Ansible module. Must contain mount_point in parameters.
:param mounted: HVAC client
:return: change status, error
:rtype: (bool, dict)
"""
try:
return client.secrets.pki.read_role(name=name, mount_point=mount_point).get('data')
except Exception:
return None |
Python | def compare_state(desired_state, current_state, ignore=None):
"""Compares desired state to current state. Returns true if objects are equal
Recursively walks dict object to compare all keys
:param desired_state: The state user desires.
:param current_state: The state that currently exists.
:param ignore: Ignore these keys.
:type ignore: list
:return: True if the states are the same.
:rtype: bool
"""
if ignore is None:
ignore = []
if (type(desired_state) is list):
if ((type(current_state) != list) or (len(desired_state) != len(current_state))):
return False
return set(desired_state) == set(current_state)
if (type(desired_state) is dict):
if (type(current_state) != dict):
return False
# iterate over dictionary keys
for key in desired_state.keys():
if key in ignore:
continue
v = desired_state[key]
if ((key not in current_state) or (not compare_state(v, current_state.get(key)))):
return False
return True
# lots of things get handled as strings in ansible that aren't necessarily strings, can extend this list later.
if isinstance(desired_state, str) and isinstance(current_state, int):
current_state = str(current_state)
return ((desired_state == current_state)) | def compare_state(desired_state, current_state, ignore=None):
"""Compares desired state to current state. Returns true if objects are equal
Recursively walks dict object to compare all keys
:param desired_state: The state user desires.
:param current_state: The state that currently exists.
:param ignore: Ignore these keys.
:type ignore: list
:return: True if the states are the same.
:rtype: bool
"""
if ignore is None:
ignore = []
if (type(desired_state) is list):
if ((type(current_state) != list) or (len(desired_state) != len(current_state))):
return False
return set(desired_state) == set(current_state)
if (type(desired_state) is dict):
if (type(current_state) != dict):
return False
# iterate over dictionary keys
for key in desired_state.keys():
if key in ignore:
continue
v = desired_state[key]
if ((key not in current_state) or (not compare_state(v, current_state.get(key)))):
return False
return True
# lots of things get handled as strings in ansible that aren't necessarily strings, can extend this list later.
if isinstance(desired_state, str) and isinstance(current_state, int):
current_state = str(current_state)
return ((desired_state == current_state)) |
Python | def sigmoid_gradient(x, toe, knee, tk_exp, shoulder, head, sh_exp):
"""The sigmoid control point function
All functions (once agreed on) could then by symbolically integrated, and
the resultant integrated function could be used instead (with additional
functions to maintain the additional needed constants).
:param x: input value
:param toe: toe control point (x,y)
:param knee: knee control point (x,y)
:param tk_exp: toe-knee exponent
:param shoulder: shoulder control point (x,y)
:param head: head control point (x,y)
:param sh_exp: shoulder-head exponent
:return: output derivative value
"""
if toe.x <= x <= knee.x:
return (pow(x / knee.x, tk_exp) + toe.y) * knee.y
elif knee.x < x <= shoulder.x:
return lerp(knee.y, shoulder.y, (x - knee.x) / (shoulder.x - knee.x))
elif shoulder.x < x <= head.x:
return (pow(
((head.x - shoulder.x) - (x - shoulder.x)) / (head.x - shoulder.x),
sh_exp) * (shoulder.y - head.y)) + head.y
else:
return head.y | def sigmoid_gradient(x, toe, knee, tk_exp, shoulder, head, sh_exp):
"""The sigmoid control point function
All functions (once agreed on) could then by symbolically integrated, and
the resultant integrated function could be used instead (with additional
functions to maintain the additional needed constants).
:param x: input value
:param toe: toe control point (x,y)
:param knee: knee control point (x,y)
:param tk_exp: toe-knee exponent
:param shoulder: shoulder control point (x,y)
:param head: head control point (x,y)
:param sh_exp: shoulder-head exponent
:return: output derivative value
"""
if toe.x <= x <= knee.x:
return (pow(x / knee.x, tk_exp) + toe.y) * knee.y
elif knee.x < x <= shoulder.x:
return lerp(knee.y, shoulder.y, (x - knee.x) / (shoulder.x - knee.x))
elif shoulder.x < x <= head.x:
return (pow(
((head.x - shoulder.x) - (x - shoulder.x)) / (head.x - shoulder.x),
sh_exp) * (shoulder.y - head.y)) + head.y
else:
return head.y |
Python | def process(self):
"""Extend this method for implementing your Consumer logic.
If the message can not be processed and the Consumer should stop after
n failures to process messages, raise the ConsumerException.
:raises: ConsumerException
:raises: NotImplementedError
"""
raise NotImplementedError | def process(self):
"""Extend this method for implementing your Consumer logic.
If the message can not be processed and the Consumer should stop after
n failures to process messages, raise the ConsumerException.
:raises: ConsumerException
:raises: NotImplementedError
"""
raise NotImplementedError |
Python | def receive(self, message_in):
"""Process the message from RabbitMQ. To implement logic for processing
a message, extend Consumer._process, not this method.
:param rejected.Consumer.Message message_in: The message to process
:rtype: bool
"""
LOGGER.debug('Received: %r', message_in)
self._message = message_in
# Validate the message type if the child sets _MESSAGE_TYPE
if self.MESSAGE_TYPE and self.MESSAGE_TYPE != self.message_type:
LOGGER.error('Received a non-supported message type: %s',
self.message_type)
# Should the message be dropped or returned to the broker?
if self.DROP_INVALID_MESSAGES:
LOGGER.debug('Dropping the invalid message')
return
else:
raise ConsumerException('Invalid message type')
# Let the child object process the message
self.process() | def receive(self, message_in):
"""Process the message from RabbitMQ. To implement logic for processing
a message, extend Consumer._process, not this method.
:param rejected.Consumer.Message message_in: The message to process
:rtype: bool
"""
LOGGER.debug('Received: %r', message_in)
self._message = message_in
# Validate the message type if the child sets _MESSAGE_TYPE
if self.MESSAGE_TYPE and self.MESSAGE_TYPE != self.message_type:
LOGGER.error('Received a non-supported message type: %s',
self.message_type)
# Should the message be dropped or returned to the broker?
if self.DROP_INVALID_MESSAGES:
LOGGER.debug('Dropping the invalid message')
return
else:
raise ConsumerException('Invalid message type')
# Let the child object process the message
self.process() |
Python | def body(self):
"""Access the opaque body from the current message.
:rtype: str
"""
return self._message.body | def body(self):
"""Access the opaque body from the current message.
:rtype: str
"""
return self._message.body |
Python | def configuration(self):
"""Access the configuration stanza for the consumer as specified by
the ``config`` section for the consumer in the rejected configuration.
:rtype: dict
"""
return self._config | def configuration(self):
"""Access the configuration stanza for the consumer as specified by
the ``config`` section for the consumer in the rejected configuration.
:rtype: dict
"""
return self._config |
Python | def content_encoding(self):
"""Access the current message's ``content-encoding`` property as an
attribute of the consumer class.
:rtype: str
"""
return (self._message.properties.content_encoding or '').lower() or None | def content_encoding(self):
"""Access the current message's ``content-encoding`` property as an
attribute of the consumer class.
:rtype: str
"""
return (self._message.properties.content_encoding or '').lower() or None |
Python | def content_type(self):
"""Access the current message's ``content-type`` property as an
attribute of the consumer class.
:rtype: str
"""
return (self._message.properties.content_type or '').lower() or None | def content_type(self):
"""Access the current message's ``content-type`` property as an
attribute of the consumer class.
:rtype: str
"""
return (self._message.properties.content_type or '').lower() or None |
Python | def correlation_id(self):
"""Access the current message's ``correlation-id`` property as an
attribute of the consumer class.
:rtype: str
"""
return self._message.properties.correlation_id | def correlation_id(self):
"""Access the current message's ``correlation-id`` property as an
attribute of the consumer class.
:rtype: str
"""
return self._message.properties.correlation_id |
Python | def exchange(self):
"""Access the exchange the message was published to as an attribute
of the consumer class.
:rtype: str
"""
return self._message.exchange | def exchange(self):
"""Access the exchange the message was published to as an attribute
of the consumer class.
:rtype: str
"""
return self._message.exchange |
Python | def expiration(self):
"""Access the current message's ``expiration`` property as an attribute
of the consumer class.
:rtype: str
"""
return self._message.properties.expiration | def expiration(self):
"""Access the current message's ``expiration`` property as an attribute
of the consumer class.
:rtype: str
"""
return self._message.properties.expiration |
Python | def headers(self):
"""Access the current message's ``headers`` property as an attribute
of the consumer class.
:rtype: dict
"""
return self._message.properties.headers or dict() | def headers(self):
"""Access the current message's ``headers`` property as an attribute
of the consumer class.
:rtype: dict
"""
return self._message.properties.headers or dict() |
Python | def name(self):
"""Property returning the name of the consumer class.
:rtype: str
"""
return self.__class__.__name__ | def name(self):
"""Property returning the name of the consumer class.
:rtype: str
"""
return self.__class__.__name__ |
Python | def priority(self):
"""Access the current message's ``priority`` property as an
attribute of the consumer class.
:rtype: int
"""
return self._message.properties.priority | def priority(self):
"""Access the current message's ``priority`` property as an
attribute of the consumer class.
:rtype: int
"""
return self._message.properties.priority |
Python | def properties(self):
"""Access the current message's properties in dict form as an attribute
of the consumer class.
:rtype: dict
"""
return dict(self._message.properties) | def properties(self):
"""Access the current message's properties in dict form as an attribute
of the consumer class.
:rtype: dict
"""
return dict(self._message.properties) |
Python | def redelivered(self):
"""Indicates if the current message has been redelivered.
:rtype: bool
"""
return self._message.redelivered | def redelivered(self):
"""Indicates if the current message has been redelivered.
:rtype: bool
"""
return self._message.redelivered |
Python | def routing_key(self):
"""Access the routing key for the current message.
:rtype: str
"""
return self._message.routing_key | def routing_key(self):
"""Access the routing key for the current message.
:rtype: str
"""
return self._message.routing_key |
Python | def message_type(self):
"""Access the current message's ``type`` property as an attribute of
the consumer class.
:rtype: str
"""
return self._message.properties.type | def message_type(self):
"""Access the current message's ``type`` property as an attribute of
the consumer class.
:rtype: str
"""
return self._message.properties.type |
Python | def timestamp(self):
"""Access the unix epoch timestamp value from the properties of the
current message.
:rtype: int
"""
return self._message.properties.timestamp | def timestamp(self):
"""Access the unix epoch timestamp value from the properties of the
current message.
:rtype: int
"""
return self._message.properties.timestamp |
Python | def user_id(self):
"""Access the user-id from the current message's properties.
:rtype: str
"""
return self._message.properties.user_id | def user_id(self):
"""Access the user-id from the current message's properties.
:rtype: str
"""
return self._message.properties.user_id |
Python | def publish_message(self, exchange, routing_key, properties, body):
"""Publish a message to RabbitMQ on the same channel the original
message was received on.
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param dict properties: The message properties
:param str body: The message body
"""
# Convert the dict to pika.BasicProperties
LOGGER.debug('Converting properties')
msg_props = self._get_pika_properties(properties)
# Publish the message
LOGGER.debug('Publishing message to %s:%s', exchange, routing_key)
self._channel.basic_publish(exchange=exchange,
routing_key=routing_key,
properties=msg_props,
body=body) | def publish_message(self, exchange, routing_key, properties, body):
"""Publish a message to RabbitMQ on the same channel the original
message was received on.
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param dict properties: The message properties
:param str body: The message body
"""
# Convert the dict to pika.BasicProperties
LOGGER.debug('Converting properties')
msg_props = self._get_pika_properties(properties)
# Publish the message
LOGGER.debug('Publishing message to %s:%s', exchange, routing_key)
self._channel.basic_publish(exchange=exchange,
routing_key=routing_key,
properties=msg_props,
body=body) |
Python | def reply(self, response_body, properties, auto_id=True,
exchange=None, reply_to=None):
"""Reply to the received message.
If auto_id is True, a new uuid4 value will be generated for the
message_id and correlation_id will be set to the message_id of the
original message. In addition, the timestamp will be assigned the
current time of the message. If auto_id is False, neither the
message_id or the correlation_id will be changed in the properties.
If exchange is not set, the exchange the message was received on will
be used.
If reply_to is set in the original properties,
it will be used as the routing key. If the reply_to is not set
in the properties and it is not passed in, a ValueException will be
raised. If reply to is set in the properties, it will be cleared out
prior to the message being republished.
:param any response_body: The message body to send
:param rejected.data.Properties properties: Message properties to use
:param bool auto_id: Automatically shuffle message_id and correlation_id
:param str reply_to: Override the reply_to in the properties
:raises: ValueError
"""
if not properties.reply_to and not reply_to:
raise ValueError('Missing reply_to in properties or as argument')
if auto_id and properties.message_id:
properties.app_id = __name__
properties.correlation_id = properties.message_id
properties.message_id = str(uuid.uuid4())
properties.timestamp = int(time.time())
LOGGER.debug('New message_id: %s', properties.message_id)
LOGGER.debug('Correlation_id: %s', properties.correlation_id)
# Redefine the reply to if needed
reply_to = reply_to or properties.reply_to
# Wipe out reply_to if it's set
if properties.reply_to:
properties.reply_to = None
self.publish_message(exchange or self._message.exchange,
reply_to,
dict(properties),
response_body) | def reply(self, response_body, properties, auto_id=True,
exchange=None, reply_to=None):
"""Reply to the received message.
If auto_id is True, a new uuid4 value will be generated for the
message_id and correlation_id will be set to the message_id of the
original message. In addition, the timestamp will be assigned the
current time of the message. If auto_id is False, neither the
message_id or the correlation_id will be changed in the properties.
If exchange is not set, the exchange the message was received on will
be used.
If reply_to is set in the original properties,
it will be used as the routing key. If the reply_to is not set
in the properties and it is not passed in, a ValueException will be
raised. If reply to is set in the properties, it will be cleared out
prior to the message being republished.
:param any response_body: The message body to send
:param rejected.data.Properties properties: Message properties to use
:param bool auto_id: Automatically shuffle message_id and correlation_id
:param str reply_to: Override the reply_to in the properties
:raises: ValueError
"""
if not properties.reply_to and not reply_to:
raise ValueError('Missing reply_to in properties or as argument')
if auto_id and properties.message_id:
properties.app_id = __name__
properties.correlation_id = properties.message_id
properties.message_id = str(uuid.uuid4())
properties.timestamp = int(time.time())
LOGGER.debug('New message_id: %s', properties.message_id)
LOGGER.debug('Correlation_id: %s', properties.correlation_id)
# Redefine the reply to if needed
reply_to = reply_to or properties.reply_to
# Wipe out reply_to if it's set
if properties.reply_to:
properties.reply_to = None
self.publish_message(exchange or self._message.exchange,
reply_to,
dict(properties),
response_body) |
Python | def body(self):
"""Return the message body, unencoded if needed,
deserialized if possible.
:rtype: any
"""
# Return a materialized view of the body if it has been previously set
if self._message_body:
return self._message_body
# Handle bzip2 compressed content
elif self.content_encoding == 'bzip2':
self._message_body = self._decode_bz2(self._message.body)
# Handle zlib compressed content
elif self.content_encoding == 'gzip':
self._message_body = self._decode_gzip(self._message.body)
# Else we want to assign self._message.body to self._message_body
else:
self._message_body = self._message.body
# Handle the auto-deserialization
if self.content_type == 'application/json':
self._message_body = self._load_json_value(self._message_body)
elif self.content_type in PICKLE_MIME_TYPES:
self._message_body = self._load_pickle_value(self._message_body)
elif self.content_type == 'application/x-plist':
self._message_body = self._load_plist_value(self._message_body)
elif self.content_type == 'text/csv':
self._message_body = self._load_csv_value(self._message_body)
elif bs4 and self.content_type in BS4_MIME_TYPES:
self._message_body = self._load_bs4_value(self._message_body)
elif self.content_type in YAML_MIME_TYPES:
self._message_body = self._load_yaml_value(self._message_body)
# Return the message body
return self._message_body | def body(self):
"""Return the message body, unencoded if needed,
deserialized if possible.
:rtype: any
"""
# Return a materialized view of the body if it has been previously set
if self._message_body:
return self._message_body
# Handle bzip2 compressed content
elif self.content_encoding == 'bzip2':
self._message_body = self._decode_bz2(self._message.body)
# Handle zlib compressed content
elif self.content_encoding == 'gzip':
self._message_body = self._decode_gzip(self._message.body)
# Else we want to assign self._message.body to self._message_body
else:
self._message_body = self._message.body
# Handle the auto-deserialization
if self.content_type == 'application/json':
self._message_body = self._load_json_value(self._message_body)
elif self.content_type in PICKLE_MIME_TYPES:
self._message_body = self._load_pickle_value(self._message_body)
elif self.content_type == 'application/x-plist':
self._message_body = self._load_plist_value(self._message_body)
elif self.content_type == 'text/csv':
self._message_body = self._load_csv_value(self._message_body)
elif bs4 and self.content_type in BS4_MIME_TYPES:
self._message_body = self._load_bs4_value(self._message_body)
elif self.content_type in YAML_MIME_TYPES:
self._message_body = self._load_yaml_value(self._message_body)
# Return the message body
return self._message_body |
Python | def _load_bs4_value(value):
"""Load an HTML or XML string into an lxml etree object.
:param str value: The HTML or XML string
:rtype: bs4.BeautifulSoup
:raises: ConsumerException
"""
if not bs4:
raise ConsumerException('BeautifulSoup4 is not enabled')
return bs4.BeautifulSoup(value) | def _load_bs4_value(value):
"""Load an HTML or XML string into an lxml etree object.
:param str value: The HTML or XML string
:rtype: bs4.BeautifulSoup
:raises: ConsumerException
"""
if not bs4:
raise ConsumerException('BeautifulSoup4 is not enabled')
return bs4.BeautifulSoup(value) |
Python | def _load_csv_value(value):
"""Create a csv.DictReader instance for the sniffed dialect for the
value passed in.
:param str value: The CSV value
:rtype: csv.DictReader
"""
csv_buffer = stringio.StringIO(value)
dialect = csv.Sniffer().sniff(csv_buffer.read(1024))
csv_buffer.seek(0)
return csv.DictReader(csv_buffer, dialect=dialect) | def _load_csv_value(value):
"""Create a csv.DictReader instance for the sniffed dialect for the
value passed in.
:param str value: The CSV value
:rtype: csv.DictReader
"""
csv_buffer = stringio.StringIO(value)
dialect = csv.Sniffer().sniff(csv_buffer.read(1024))
csv_buffer.seek(0)
return csv.DictReader(csv_buffer, dialect=dialect) |
Python | def _load_json_value(value):
"""Deserialize a JSON string returning the native Python data type
for the value.
:param str value: The JSON string
:rtype: object
"""
try:
return json.loads(value, encoding='utf-8')
except ValueError as error:
LOGGER.error('Could not decode message body: %s', error,
exc_info=sys.exc_info())
raise MessageException(error) | def _load_json_value(value):
"""Deserialize a JSON string returning the native Python data type
for the value.
:param str value: The JSON string
:rtype: object
"""
try:
return json.loads(value, encoding='utf-8')
except ValueError as error:
LOGGER.error('Could not decode message body: %s', error,
exc_info=sys.exc_info())
raise MessageException(error) |
Python | def _load_pickle_value(value):
"""Deserialize a pickle string returning the native Python data type
for the value.
:param str value: The pickle string
:rtype: object
"""
return pickle.loads(value) | def _load_pickle_value(value):
"""Deserialize a pickle string returning the native Python data type
for the value.
:param str value: The pickle string
:rtype: object
"""
return pickle.loads(value) |
Python | def _load_plist_value(value):
"""Deserialize a plist string returning the native Python data type
for the value.
:param str value: The pickle string
:rtype: dict
"""
return plistlib.readPlistFromString(value) | def _load_plist_value(value):
"""Deserialize a plist string returning the native Python data type
for the value.
:param str value: The pickle string
:rtype: dict
"""
return plistlib.readPlistFromString(value) |
Python | def _load_yaml_value(value):
"""Load an YAML string into an dict object.
:param str value: The YAML string
:rtype: any
:raises: ConsumerException
"""
return yaml.load(value) | def _load_yaml_value(value):
"""Load an YAML string into an dict object.
:param str value: The YAML string
:rtype: any
:raises: ConsumerException
"""
return yaml.load(value) |
Python | def publish_message(self, exchange, routing_key, properties, body,
no_serialization=False, no_encoding=False):
"""Publish a message to RabbitMQ on the same channel the original
message was received on.
By default, if you pass a non-string object to the body and the
properties have a supported content-type set, the body will be
auto-serialized in the specified content-type.
If the properties do not have a timestamp set, it will be set to the
current time.
If you specify a content-encoding in the properties and the encoding is
supported, the body will be auto-encoded.
Both of these behaviors can be disabled by setting no_serialization or
no_encoding to True.
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param dict properties: The message properties
:param no_serialization: Turn off auto-serialization of the body
:param no_encoding: Turn off auto-encoding of the body
"""
# Convert the rejected.data.Properties object to a pika.BasicProperties
LOGGER.debug('Converting properties')
properties_out = self._get_pika_properties(properties)
# Auto-serialize the content if needed
if (not no_serialization and not isinstance(body, basestring) and
properties.get('content_type')):
LOGGER.debug('Auto-serializing message body')
body = self._auto_serialize(properties.get('content_type'), body)
# Auto-encode the message body if needed
if not no_encoding and properties.get('content_encoding'):
LOGGER.debug('Auto-encoding message body')
body = self._auto_encode(properties.get('content_encoding'), body)
# Publish the message
LOGGER.debug('Publishing message to %s:%s', exchange, routing_key)
self._channel.basic_publish(exchange=exchange,
routing_key=routing_key,
properties=properties_out,
body=body) | def publish_message(self, exchange, routing_key, properties, body,
no_serialization=False, no_encoding=False):
"""Publish a message to RabbitMQ on the same channel the original
message was received on.
By default, if you pass a non-string object to the body and the
properties have a supported content-type set, the body will be
auto-serialized in the specified content-type.
If the properties do not have a timestamp set, it will be set to the
current time.
If you specify a content-encoding in the properties and the encoding is
supported, the body will be auto-encoded.
Both of these behaviors can be disabled by setting no_serialization or
no_encoding to True.
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param dict properties: The message properties
:param no_serialization: Turn off auto-serialization of the body
:param no_encoding: Turn off auto-encoding of the body
"""
# Convert the rejected.data.Properties object to a pika.BasicProperties
LOGGER.debug('Converting properties')
properties_out = self._get_pika_properties(properties)
# Auto-serialize the content if needed
if (not no_serialization and not isinstance(body, basestring) and
properties.get('content_type')):
LOGGER.debug('Auto-serializing message body')
body = self._auto_serialize(properties.get('content_type'), body)
# Auto-encode the message body if needed
if not no_encoding and properties.get('content_encoding'):
LOGGER.debug('Auto-encoding message body')
body = self._auto_encode(properties.get('content_encoding'), body)
# Publish the message
LOGGER.debug('Publishing message to %s:%s', exchange, routing_key)
self._channel.basic_publish(exchange=exchange,
routing_key=routing_key,
properties=properties_out,
body=body) |
Python | def _auto_encode(self, content_encoding, value):
"""Based upon the value of the content_encoding, encode the value.
:param str content_encoding: The content encoding type (gzip, bzip2)
:param str value: The value to encode
:rtype: value
"""
if content_encoding == 'gzip':
return self._encode_gzip(value)
if content_encoding == 'bzip2':
return self._encode_bz2(value)
LOGGER.warning('Invalid content-encoding specified for auto-encoding')
return value | def _auto_encode(self, content_encoding, value):
"""Based upon the value of the content_encoding, encode the value.
:param str content_encoding: The content encoding type (gzip, bzip2)
:param str value: The value to encode
:rtype: value
"""
if content_encoding == 'gzip':
return self._encode_gzip(value)
if content_encoding == 'bzip2':
return self._encode_bz2(value)
LOGGER.warning('Invalid content-encoding specified for auto-encoding')
return value |
Python | def _auto_serialize(self, content_type, value):
"""Auto-serialization of the value based upon the content-type value.
:param str content_type: The content type to serialize
:param any value: The value to serialize
:rtype: str
"""
if content_type == 'application/json':
LOGGER.debug('Auto-serializing content as JSON')
return self._dump_json_value(value)
if content_type in PICKLE_MIME_TYPES:
LOGGER.debug('Auto-serializing content as Pickle')
return self._dump_pickle_value(value)
if content_type == 'application/x-plist':
LOGGER.debug('Auto-serializing content as plist')
return self._dump_plist_value(value)
if content_type == 'text/csv':
LOGGER.debug('Auto-serializing content as csv')
return self._dump_csv_value(value)
# If it's XML or HTML auto
if (bs4 and isinstance(value, bs4.BeautifulSoup) and
content_type in ('text/html', 'text/xml')):
LOGGER.debug('Dumping BS4 object into HTML or XML')
return self._dump_bs4_value(value)
# If it's YAML, load the content via pyyaml into a dict
if self.content_type in YAML_MIME_TYPES:
LOGGER.debug('Auto-serializing content as YAML')
return self._dump_yaml_value(value)
LOGGER.warning('Invalid content-type specified for auto-serialization')
return value | def _auto_serialize(self, content_type, value):
"""Auto-serialization of the value based upon the content-type value.
:param str content_type: The content type to serialize
:param any value: The value to serialize
:rtype: str
"""
if content_type == 'application/json':
LOGGER.debug('Auto-serializing content as JSON')
return self._dump_json_value(value)
if content_type in PICKLE_MIME_TYPES:
LOGGER.debug('Auto-serializing content as Pickle')
return self._dump_pickle_value(value)
if content_type == 'application/x-plist':
LOGGER.debug('Auto-serializing content as plist')
return self._dump_plist_value(value)
if content_type == 'text/csv':
LOGGER.debug('Auto-serializing content as csv')
return self._dump_csv_value(value)
# If it's XML or HTML auto
if (bs4 and isinstance(value, bs4.BeautifulSoup) and
content_type in ('text/html', 'text/xml')):
LOGGER.debug('Dumping BS4 object into HTML or XML')
return self._dump_bs4_value(value)
# If it's YAML, load the content via pyyaml into a dict
if self.content_type in YAML_MIME_TYPES:
LOGGER.debug('Auto-serializing content as YAML')
return self._dump_yaml_value(value)
LOGGER.warning('Invalid content-type specified for auto-serialization')
return value |
Python | def _dump_csv_value(value):
"""Take a list of lists and return it as a CSV value
:param list value: A list of lists to return as a CSV
:rtype: str
"""
buffer = stringio.StringIO()
writer = csv.writer(buffer,quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerows(value)
buffer.seek(0)
value = buffer.read()
buffer.close()
return value | def _dump_csv_value(value):
"""Take a list of lists and return it as a CSV value
:param list value: A list of lists to return as a CSV
:rtype: str
"""
buffer = stringio.StringIO()
writer = csv.writer(buffer,quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerows(value)
buffer.seek(0)
value = buffer.read()
buffer.close()
return value |
Python | def _dump_pickle_value(value):
"""Serialize a value into the pickle format
:param any value: The object to pickle
:rtype: str
"""
return pickle.dumps(value) | def _dump_pickle_value(value):
"""Serialize a value into the pickle format
:param any value: The object to pickle
:rtype: str
"""
return pickle.dumps(value) |
Python | def _dump_plist_value(value):
"""Create a plist value from a dictionary
:param dict value: The value to make the plist from
:rtype: dict
"""
return plistlib.writePlistToString(value) | def _dump_plist_value(value):
"""Create a plist value from a dictionary
:param dict value: The value to make the plist from
:rtype: dict
"""
return plistlib.writePlistToString(value) |
Python | def _dump_yaml_value(value):
"""Dump a dict into a YAML string
:param dict value: The value to dump as a YAML string
:rtype: str
"""
return yaml.dump(value) | def _dump_yaml_value(value):
"""Dump a dict into a YAML string
:param dict value: The value to dump as a YAML string
:rtype: str
"""
return yaml.dump(value) |
Python | def import_consumer(value):
"""Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux
and it will return a handle to the class
:param str value: The consumer class in module.Consumer format
:return: tuple(Class, str)
"""
parts = value.split('.')
import_name = '.'.join(parts[0:-1])
import_handle = importlib.import_module(import_name)
if hasattr(import_handle, '__version__'):
version = import_handle.__version__
else:
version = None
# Return the class handle
return getattr(import_handle, parts[-1]), version | def import_consumer(value):
"""Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux
and it will return a handle to the class
:param str value: The consumer class in module.Consumer format
:return: tuple(Class, str)
"""
parts = value.split('.')
import_name = '.'.join(parts[0:-1])
import_handle = importlib.import_module(import_name)
if hasattr(import_handle, '__version__'):
version = import_handle.__version__
else:
version = None
# Return the class handle
return getattr(import_handle, parts[-1]), version |
Python | def ack_message(self, delivery_tag):
"""Acknowledge the message on the broker and log the ack
:param str delivery_tag: Delivery tag to acknowledge
"""
if not self.can_respond:
LOGGER.warning('Can not ack message, disconnected from RabbitMQ')
self.increment_count(self.CLOSED_ON_COMPLETE)
return
LOGGER.debug('Acking %s', delivery_tag)
self._channel.basic_ack(delivery_tag=delivery_tag)
self.increment_count(self.ACKED) | def ack_message(self, delivery_tag):
"""Acknowledge the message on the broker and log the ack
:param str delivery_tag: Delivery tag to acknowledge
"""
if not self.can_respond:
LOGGER.warning('Can not ack message, disconnected from RabbitMQ')
self.increment_count(self.CLOSED_ON_COMPLETE)
return
LOGGER.debug('Acking %s', delivery_tag)
self._channel.basic_ack(delivery_tag=delivery_tag)
self.increment_count(self.ACKED) |
Python | def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.debug('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed) | def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.debug('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed) |
Python | def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.debug('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed) | def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.debug('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed) |
Python | def base_qos_prefetch(self):
"""Return the base, configured QoS prefetch value.
:rtype: int
"""
return self._config.get('qos_prefetch', self._QOS_PREFETCH_COUNT) | def base_qos_prefetch(self):
"""Return the base, configured QoS prefetch value.
:rtype: int
"""
return self._config.get('qos_prefetch', self._QOS_PREFETCH_COUNT) |
Python | def calculate_qos_prefetch(self):
"""Determine if the channel should use the dynamic QoS value, stay at
the same QoS or use the default QoS.
:rtype: bool or int
"""
if not self._last_stats_time:
return
qos_prefetch = self.dynamic_qos_pretch
# Don't change anything
if qos_prefetch == self._qos_prefetch:
LOGGER.debug('No change in QoS prefetch calculation of %i',
self._qos_prefetch)
return False
# Don't change anything
if self.count_processed_last_interval < qos_prefetch:
LOGGER.error('Processed fewer messages last interval than the '
'qos_prefetch value')
return False
# If calculated QoS exceeds max
if qos_prefetch > self._QOS_MAX:
LOGGER.debug('Hit QoS Max ceiling of %i', self._QOS_MAX)
return self.set_qos_prefetch(self._QOS_MAX)
# Set to base value if QoS calc is < than the base
if self.base_qos_prefetch > qos_prefetch:
LOGGER.debug('QoS calculation is lower than base: %i < %i',
qos_prefetch, self.base_qos_prefetch)
return self.set_qos_prefetch()
# Increase the QoS setting
if qos_prefetch > self._qos_prefetch:
LOGGER.debug('QoS calculation is higher than previous: %i > %i',
qos_prefetch, self._qos_prefetch)
return self.set_qos_prefetch(qos_prefetch)
# Lower the QoS value based upon the processed qty
if qos_prefetch < self._qos_prefetch:
LOGGER.debug('QoS calculation is lower than previous: %i < %i',
qos_prefetch, self._qos_prefetch)
return self.set_qos_prefetch(qos_prefetch) | def calculate_qos_prefetch(self):
"""Determine if the channel should use the dynamic QoS value, stay at
the same QoS or use the default QoS.
:rtype: bool or int
"""
if not self._last_stats_time:
return
qos_prefetch = self.dynamic_qos_pretch
# Don't change anything
if qos_prefetch == self._qos_prefetch:
LOGGER.debug('No change in QoS prefetch calculation of %i',
self._qos_prefetch)
return False
# Don't change anything
if self.count_processed_last_interval < qos_prefetch:
LOGGER.error('Processed fewer messages last interval than the '
'qos_prefetch value')
return False
# If calculated QoS exceeds max
if qos_prefetch > self._QOS_MAX:
LOGGER.debug('Hit QoS Max ceiling of %i', self._QOS_MAX)
return self.set_qos_prefetch(self._QOS_MAX)
# Set to base value if QoS calc is < than the base
if self.base_qos_prefetch > qos_prefetch:
LOGGER.debug('QoS calculation is lower than base: %i < %i',
qos_prefetch, self.base_qos_prefetch)
return self.set_qos_prefetch()
# Increase the QoS setting
if qos_prefetch > self._qos_prefetch:
LOGGER.debug('QoS calculation is higher than previous: %i > %i',
qos_prefetch, self._qos_prefetch)
return self.set_qos_prefetch(qos_prefetch)
# Lower the QoS value based upon the processed qty
if qos_prefetch < self._qos_prefetch:
LOGGER.debug('QoS calculation is lower than previous: %i < %i',
qos_prefetch, self._qos_prefetch)
return self.set_qos_prefetch(qos_prefetch) |
Python | def can_respond(self):
"""Indicates if the process can still respond to RabbitMQ when the
processing of a message has completed.
:return: bool
"""
if not self._channel:
return False
return self._message_connection_id == self._connection_id | def can_respond(self):
"""Indicates if the process can still respond to RabbitMQ when the
processing of a message has completed.
:return: bool
"""
if not self._channel:
return False
return self._message_connection_id == self._connection_id |
Python | def cancel_consumer_with_rabbitmq(self):
"""Tell RabbitMQ the process no longer wants to consumer messages."""
LOGGER.debug('Sending a Basic.Cancel to RabbitMQ')
if self._channel and self._channel.is_open:
self._channel.basic_cancel(consumer_tag=self.name) | def cancel_consumer_with_rabbitmq(self):
"""Tell RabbitMQ the process no longer wants to consumer messages."""
LOGGER.debug('Sending a Basic.Cancel to RabbitMQ')
if self._channel and self._channel.is_open:
self._channel.basic_cancel(consumer_tag=self.name) |
Python | def connect_to_rabbitmq(self, cfg, name):
"""Connect to RabbitMQ returning the connection handle.
:param dict cfg: The Connections section of the configuration
:param str name: The name of the connection
:rtype: pika.adapters.tornado_connection.TornadoConnection
"""
LOGGER.debug('Connecting to %s:%i:%s as %s',
cfg[name]['host'], cfg[name]['port'],
cfg[name]['vhost'], cfg[name]['user'])
self.set_state(self.STATE_CONNECTING)
self._connection_id += 1
hb_interval = cfg[name].get('heartbeat_interval', self._HBINTERVAL)
parameters = self.get_connection_parameters(cfg[name]['host'],
cfg[name]['port'],
cfg[name]['vhost'],
cfg[name]['user'],
cfg[name]['pass'],
hb_interval)
return tornado_connection.TornadoConnection(parameters,
self.on_connection_open,
stop_ioloop_on_close=False) | def connect_to_rabbitmq(self, cfg, name):
"""Connect to RabbitMQ returning the connection handle.
:param dict cfg: The Connections section of the configuration
:param str name: The name of the connection
:rtype: pika.adapters.tornado_connection.TornadoConnection
"""
LOGGER.debug('Connecting to %s:%i:%s as %s',
cfg[name]['host'], cfg[name]['port'],
cfg[name]['vhost'], cfg[name]['user'])
self.set_state(self.STATE_CONNECTING)
self._connection_id += 1
hb_interval = cfg[name].get('heartbeat_interval', self._HBINTERVAL)
parameters = self.get_connection_parameters(cfg[name]['host'],
cfg[name]['port'],
cfg[name]['vhost'],
cfg[name]['user'],
cfg[name]['pass'],
hb_interval)
return tornado_connection.TornadoConnection(parameters,
self.on_connection_open,
stop_ioloop_on_close=False) |
Python | def count(self, stat):
"""Return the current count quantity for a specific stat.
:param str stat: Name of stat to get value for
:rtype: int or float
"""
return self._counts.get(stat, 0) | def count(self, stat):
"""Return the current count quantity for a specific stat.
:param str stat: Name of stat to get value for
:rtype: int or float
"""
return self._counts.get(stat, 0) |
Python | def count_processed_last_interval(self):
"""Return the number of messages counted in the last interval. If
there is no last interval counts, return 0.
:rtype: int
"""
if not self._last_counts:
return 0
return self._counts[self.PROCESSED] - self._last_counts[self.PROCESSED] | def count_processed_last_interval(self):
"""Return the number of messages counted in the last interval. If
there is no last interval counts, return 0.
:rtype: int
"""
if not self._last_counts:
return 0
return self._counts[self.PROCESSED] - self._last_counts[self.PROCESSED] |
Python | def dynamic_qos_pretch(self):
"""Calculate the prefetch count based upon the message velocity * the
_QOS_PREFETCH_MULTIPLIER.
:rtype: int
"""
# Round up the velocity * the multiplier
value = int(math.ceil(self.message_velocity *
float(self._QOS_PREFETCH_MULTIPLIER)))
LOGGER.debug('Calculated prefetch value: %i', value)
return value | def dynamic_qos_pretch(self):
"""Calculate the prefetch count based upon the message velocity * the
_QOS_PREFETCH_MULTIPLIER.
:rtype: int
"""
# Round up the velocity * the multiplier
value = int(math.ceil(self.message_velocity *
float(self._QOS_PREFETCH_MULTIPLIER)))
LOGGER.debug('Calculated prefetch value: %i', value)
return value |
Python | def increment_count(self, counter, value=1):
"""Increment the specified counter, checking to see if the counter is
the error counter. if it is, check to see if there have been too many
errors and if it needs to reconnect.
:param str counter: The counter name passed in from the constant
:param int|float value: The amount to increment by
"""
self._counts[counter] += value | def increment_count(self, counter, value=1):
"""Increment the specified counter, checking to see if the counter is
the error counter. if it is, check to see if there have been too many
errors and if it needs to reconnect.
:param str counter: The counter name passed in from the constant
:param int|float value: The amount to increment by
"""
self._counts[counter] += value |
Python | def invoke_consumer(self, message):
"""Wrap the actual processor processing bits
:param Message message: Message to process
:raises: consumer.ConsumerException
"""
self.start_message_processing()
# Try and process the message
try:
LOGGER.debug('Processing message')
self._consumer.receive(message)
except KeyboardInterrupt:
self.reject(message.delivery_tag, True)
self.stop()
return False
except exceptions.ChannelClosed as error:
LOGGER.critical('RabbitMQ closed the channel: %r', error)
self.reconnect()
return False
except exceptions.ConnectionClosed as error:
LOGGER.critical('RabbitMQ closed the connection: %r', error)
self.reconnect()
return False
except consumer.ConsumerException as error:
self.record_exception(error, True, sys.exc_info())
self.reject(message.delivery_tag, True)
self.processing_failure()
return False
except consumer.MessageException as error:
self.record_exception(error, True, sys.exc_info())
self.reject(message.delivery_tag, False)
return False
except Exception as error:
self.record_exception(error, True, sys.exc_info())
self.reject(message.delivery_tag, True)
self.processing_failure()
return False
return True | def invoke_consumer(self, message):
"""Wrap the actual processor processing bits
:param Message message: Message to process
:raises: consumer.ConsumerException
"""
self.start_message_processing()
# Try and process the message
try:
LOGGER.debug('Processing message')
self._consumer.receive(message)
except KeyboardInterrupt:
self.reject(message.delivery_tag, True)
self.stop()
return False
except exceptions.ChannelClosed as error:
LOGGER.critical('RabbitMQ closed the channel: %r', error)
self.reconnect()
return False
except exceptions.ConnectionClosed as error:
LOGGER.critical('RabbitMQ closed the connection: %r', error)
self.reconnect()
return False
except consumer.ConsumerException as error:
self.record_exception(error, True, sys.exc_info())
self.reject(message.delivery_tag, True)
self.processing_failure()
return False
except consumer.MessageException as error:
self.record_exception(error, True, sys.exc_info())
self.reject(message.delivery_tag, False)
return False
except Exception as error:
self.record_exception(error, True, sys.exc_info())
self.reject(message.delivery_tag, True)
self.processing_failure()
return False
return True |
Python | def is_processing(self):
"""Returns a bool specifying if the consumer is currently processing
:rtype: bool
"""
return self._state in [self.STATE_PROCESSING, self.STATE_STOP_REQUESTED] | def is_processing(self):
"""Returns a bool specifying if the consumer is currently processing
:rtype: bool
"""
return self._state in [self.STATE_PROCESSING, self.STATE_STOP_REQUESTED] |
Python | def message_velocity(self):
"""Return the message consuming velocity for the process.
:rtype: float
"""
processed = self.count_processed_last_interval
duration = time.time() - self._last_stats_time
LOGGER.debug('Processed %i messages in %i seconds', processed, duration)
# If there were no messages, do not calculate, use the base
if not processed or not duration:
return 0
# Calculate the velocity as the basis for the calculation
velocity = float(processed) / float(duration)
LOGGER.debug('Message processing velocity: %.2f', velocity)
return velocity | def message_velocity(self):
"""Return the message consuming velocity for the process.
:rtype: float
"""
processed = self.count_processed_last_interval
duration = time.time() - self._last_stats_time
LOGGER.debug('Processed %i messages in %i seconds', processed, duration)
# If there were no messages, do not calculate, use the base
if not processed or not duration:
return 0
# Calculate the velocity as the basis for the calculation
velocity = float(processed) / float(duration)
LOGGER.debug('Message processing velocity: %.2f', velocity)
return velocity |
Python | def new_counter_dict(self):
"""Return a dict object for our internal stats keeping.
:rtype: dict
"""
return {self.ACKED: 0,
self.CLOSED_ON_COMPLETE: 0,
self.ERROR: 0,
self.FAILURES: 0,
self.UNHANDLED_EXCEPTIONS: 0,
self.PROCESSED: 0,
self.RECONNECTED: 0,
self.REDELIVERED: 0,
self.REJECTED: 0,
self.REQUEUED: 0,
self.TIME_SPENT: 0,
self.TIME_WAITED: 0} | def new_counter_dict(self):
"""Return a dict object for our internal stats keeping.
:rtype: dict
"""
return {self.ACKED: 0,
self.CLOSED_ON_COMPLETE: 0,
self.ERROR: 0,
self.FAILURES: 0,
self.UNHANDLED_EXCEPTIONS: 0,
self.PROCESSED: 0,
self.RECONNECTED: 0,
self.REDELIVERED: 0,
self.REJECTED: 0,
self.REQUEUED: 0,
self.TIME_SPENT: 0,
self.TIME_WAITED: 0} |
Python | def on_channel_closed(self, method_frame):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.frame.Method method_frame: The Channel.Close method frame
"""
LOGGER.critical('Channel was closed: (%s) %s',
method_frame.method.reply_code,
method_frame.method.reply_text)
del self._channel
raise ReconnectConnection | def on_channel_closed(self, method_frame):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.frame.Method method_frame: The Channel.Close method frame
"""
LOGGER.critical('Channel was closed: (%s) %s',
method_frame.method.reply_code,
method_frame.method.reply_text)
del self._channel
raise ReconnectConnection |
Python | def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened. It
will change the state to IDLE, add the callbacks and setup the channel
to start consuming.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_channel() | def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened. It
will change the state to IDLE, add the callbacks and setup the channel
to start consuming.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_channel() |
Python | def on_connection_closed(self, unused, code, text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection unused: The closed connection
"""
LOGGER.critical('Connection from RabbitMQ closed in state %i (%s, %s)',
self.state_description, code, text)
self._channel = None
if not self.is_shutting_down and not self.is_waiting_to_shutdown:
self.reconnect() | def on_connection_closed(self, unused, code, text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection unused: The closed connection
"""
LOGGER.critical('Connection from RabbitMQ closed in state %i (%s, %s)',
self.state_description, code, text)
self._channel = None
if not self.is_shutting_down and not self.is_waiting_to_shutdown:
self.reconnect() |
Python | def on_connection_open(self, unused):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused: pika.adapters.tornado_connection.TornadoConnection
"""
LOGGER.debug('Connection opened')
self.add_on_connection_close_callback()
self.open_channel() | def on_connection_open(self, unused):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused: pika.adapters.tornado_connection.TornadoConnection
"""
LOGGER.debug('Connection opened')
self.add_on_connection_close_callback()
self.open_channel() |
Python | def on_sigprof(self, unused_signum, unused_frame):
"""Called when SIGPROF is sent to the process, will dump the stats, in
future versions, queue them for the master process to get data.
:param int unused_signum: The signal number
:param frame unused_frame: The python frame the signal was received at
"""
values = dict()
if self.is_processing or self.is_idle:
self.calculate_qos_prefetch()
for key in self._counts.keys():
values[key] = self._counts[key] - self._last_counts.get(key, 0)
self._last_counts[key] = self._counts[key]
if self._statsd:
self.send_counter_to_statsd(key, values[key])
self._stats_queue.put({'name': self.name,
'consumer_name': self._consumer_name,
'counts': values}, True)
LOGGER.debug('Currently %s: %r', self.state_description, values)
self._last_stats_time = time.time()
signal.siginterrupt(signal.SIGPROF, False) | def on_sigprof(self, unused_signum, unused_frame):
"""Called when SIGPROF is sent to the process, will dump the stats, in
future versions, queue them for the master process to get data.
:param int unused_signum: The signal number
:param frame unused_frame: The python frame the signal was received at
"""
values = dict()
if self.is_processing or self.is_idle:
self.calculate_qos_prefetch()
for key in self._counts.keys():
values[key] = self._counts[key] - self._last_counts.get(key, 0)
self._last_counts[key] = self._counts[key]
if self._statsd:
self.send_counter_to_statsd(key, values[key])
self._stats_queue.put({'name': self.name,
'consumer_name': self._consumer_name,
'counts': values}, True)
LOGGER.debug('Currently %s: %r', self.state_description, values)
self._last_stats_time = time.time()
signal.siginterrupt(signal.SIGPROF, False) |
Python | def processing_failure(self):
"""Called when message processing failure happens due to a
ConsumerException or an unhandled exception.
"""
duration = time.time() - self._last_failure
if duration > self._MAX_ERROR_WINDOW:
LOGGER.info('Resetting failure window, %i seconds since last',
duration)
self.reset_failure_counter()
self.increment_count(self.FAILURES, -1)
self._last_failure = time.time()
if self._counts[self.FAILURES] == 0:
LOGGER.critical('Error threshold exceeded (%i), reconnecting',
self._counts[self.ERROR])
self.cancel_consumer_with_rabbitmq()
self.close_connection()
self.reconnect() | def processing_failure(self):
"""Called when message processing failure happens due to a
ConsumerException or an unhandled exception.
"""
duration = time.time() - self._last_failure
if duration > self._MAX_ERROR_WINDOW:
LOGGER.info('Resetting failure window, %i seconds since last',
duration)
self.reset_failure_counter()
self.increment_count(self.FAILURES, -1)
self._last_failure = time.time()
if self._counts[self.FAILURES] == 0:
LOGGER.critical('Error threshold exceeded (%i), reconnecting',
self._counts[self.ERROR])
self.cancel_consumer_with_rabbitmq()
self.close_connection()
self.reconnect() |
Python | def reconnect(self):
"""Reconnect to RabbitMQ after sleeping for _RECONNECT_DELAY"""
LOGGER.info('Reconnecting to RabbitMQ in %i seconds',
self._RECONNECT_DELAY)
self.increment_count(self.RECONNECTED)
self.set_state(self.STATE_INITIALIZING)
if self._connection:
if self._connection.socket:
fd = self._connection.socket.fileno()
self._ioloop.remove_handler(fd)
self._connection = None
self._ioloop.add_timeout(time.time() + self._RECONNECT_DELAY,
self._reconnect) | def reconnect(self):
"""Reconnect to RabbitMQ after sleeping for _RECONNECT_DELAY"""
LOGGER.info('Reconnecting to RabbitMQ in %i seconds',
self._RECONNECT_DELAY)
self.increment_count(self.RECONNECTED)
self.set_state(self.STATE_INITIALIZING)
if self._connection:
if self._connection.socket:
fd = self._connection.socket.fileno()
self._ioloop.remove_handler(fd)
self._connection = None
self._ioloop.add_timeout(time.time() + self._RECONNECT_DELAY,
self._reconnect) |
Python | def _reconnect(self):
"""Create and set the RabbitMQ connection"""
LOGGER.info('Connecting to RabbitMQ')
self.reset_failure_counter()
self._connection = self.connect_to_rabbitmq(self._connections,
self._connection_name)
self.setup_signal_handlers() | def _reconnect(self):
"""Create and set the RabbitMQ connection"""
LOGGER.info('Connecting to RabbitMQ')
self.reset_failure_counter()
self._connection = self.connect_to_rabbitmq(self._connections,
self._connection_name)
self.setup_signal_handlers() |
Python | def reject(self, delivery_tag, requeue=True):
"""Reject the message on the broker and log it. We should move this to
use to nack when Pika supports it in a released version.
:param str delivery_tag: Delivery tag to reject
:param bool requeue: Specify if the message should be re-queued or not
"""
if not self._ack:
raise RuntimeError('Can not rejected messages when ack is False')
if not self.can_respond:
LOGGER.warning('Can not reject message, disconnected from RabbitMQ')
self.increment_count(self.CLOSED_ON_COMPLETE)
if self.is_processing:
self.reset_state()
return
LOGGER.warning('Rejecting message %s %s requeue', delivery_tag,
'with' if requeue else 'without')
self._channel.basic_nack(delivery_tag=delivery_tag, requeue=requeue)
self.increment_count(self.REQUEUED if requeue else self.REJECTED)
if self.is_processing:
self.reset_state() | def reject(self, delivery_tag, requeue=True):
"""Reject the message on the broker and log it. We should move this to
use to nack when Pika supports it in a released version.
:param str delivery_tag: Delivery tag to reject
:param bool requeue: Specify if the message should be re-queued or not
"""
if not self._ack:
raise RuntimeError('Can not rejected messages when ack is False')
if not self.can_respond:
LOGGER.warning('Can not reject message, disconnected from RabbitMQ')
self.increment_count(self.CLOSED_ON_COMPLETE)
if self.is_processing:
self.reset_state()
return
LOGGER.warning('Rejecting message %s %s requeue', delivery_tag,
'with' if requeue else 'without')
self._channel.basic_nack(delivery_tag=delivery_tag, requeue=requeue)
self.increment_count(self.REQUEUED if requeue else self.REJECTED)
if self.is_processing:
self.reset_state() |
Python | def reset_failure_counter(self):
"""Reset the failure counter to the max error count"""
LOGGER.debug('Resetting the failure counter to %i',
self._max_error_count)
self._counts[self.FAILURES] = self._max_error_count | def reset_failure_counter(self):
"""Reset the failure counter to the max error count"""
LOGGER.debug('Resetting the failure counter to %i',
self._max_error_count)
self._counts[self.FAILURES] = self._max_error_count |
Python | def reset_state(self):
"""Reset the runtime state after processing a message to either idle
or shutting down based upon the current state.
"""
if self.is_waiting_to_shutdown:
self.set_state(self.STATE_SHUTTING_DOWN)
self.on_ready_to_stop()
elif self.is_processing:
self.set_state(self.STATE_IDLE)
elif self.is_idle or self.is_connecting:
pass
else:
LOGGER.critical('Unexepected state: %s', self.state_description) | def reset_state(self):
"""Reset the runtime state after processing a message to either idle
or shutting down based upon the current state.
"""
if self.is_waiting_to_shutdown:
self.set_state(self.STATE_SHUTTING_DOWN)
self.on_ready_to_stop()
elif self.is_processing:
self.set_state(self.STATE_IDLE)
elif self.is_idle or self.is_connecting:
pass
else:
LOGGER.critical('Unexepected state: %s', self.state_description) |
Python | def _run(self):
"""Run method that can be profiled"""
self._ioloop = ioloop.IOLoop.instance()
common.add_null_handler()
try:
self.setup(self._kwargs['config'],
self._kwargs['connection_name'],
self._kwargs['consumer_name'],
self._kwargs['stats_queue'],
self._kwargs['logging_config'])
except ImportError as error:
name = self._kwargs['consumer_name']
classname = self._kwargs['config']['Consumers'][name]['consumer']
LOGGER.critical('Could not import %s, stopping process: %r',
classname, error)
return
if not self.is_stopped:
try:
self._ioloop.start()
except KeyboardInterrupt:
LOGGER.warning('CTRL-C while waiting for clean shutdown') | def _run(self):
"""Run method that can be profiled"""
self._ioloop = ioloop.IOLoop.instance()
common.add_null_handler()
try:
self.setup(self._kwargs['config'],
self._kwargs['connection_name'],
self._kwargs['consumer_name'],
self._kwargs['stats_queue'],
self._kwargs['logging_config'])
except ImportError as error:
name = self._kwargs['consumer_name']
classname = self._kwargs['config']['Consumers'][name]['consumer']
LOGGER.critical('Could not import %s, stopping process: %r',
classname, error)
return
if not self.is_stopped:
try:
self._ioloop.start()
except KeyboardInterrupt:
LOGGER.warning('CTRL-C while waiting for clean shutdown') |
Python | def send_counter_to_statsd(self, counter, value=1):
"""Send a metric passed in to statsd.
:param str counter: The counter name
:param int|float value: The count
"""
payload = self._STATSD_FORMAT.format(self._consumer_name,
counter,
math.ceil(value))
self._statsd_socket.sendto(payload, (self._statsd_host,
self._statsd_port)) | def send_counter_to_statsd(self, counter, value=1):
"""Send a metric passed in to statsd.
:param str counter: The counter name
:param int|float value: The count
"""
payload = self._STATSD_FORMAT.format(self._consumer_name,
counter,
math.ceil(value))
self._statsd_socket.sendto(payload, (self._statsd_host,
self._statsd_port)) |
Python | def start_message_processing(self):
"""Keep track of the connection in case RabbitMQ disconnects while the
message is processing.
"""
self._message_connection_id = self._connection_id | def start_message_processing(self):
"""Keep track of the connection in case RabbitMQ disconnects while the
message is processing.
"""
self._message_connection_id = self._connection_id |
Python | def stop(self, signum=None, frame_unused=None):
"""Stop the consumer from consuming by calling BasicCancel and setting
our state.
"""
LOGGER.debug('Stop called in state: %s', self.state_description)
if self.is_stopped:
LOGGER.warning('Stop requested but consumer is already stopped')
return
elif self.is_shutting_down:
LOGGER.warning('Stop requested, consumer is already shutting down')
return
elif self.is_waiting_to_shutdown:
LOGGER.warning('Stop requested but already waiting to shut down')
return
# Stop consuming
self.cancel_consumer_with_rabbitmq()
# Wait until the consumer has finished processing to shutdown
if self.is_processing:
LOGGER.info('Waiting for consumer to finish processing')
self.set_state(self.STATE_STOP_REQUESTED)
if signum == signal.SIGTERM:
signal.siginterrupt(signal.SIGTERM, False)
return
self.on_ready_to_stop() | def stop(self, signum=None, frame_unused=None):
"""Stop the consumer from consuming by calling BasicCancel and setting
our state.
"""
LOGGER.debug('Stop called in state: %s', self.state_description)
if self.is_stopped:
LOGGER.warning('Stop requested but consumer is already stopped')
return
elif self.is_shutting_down:
LOGGER.warning('Stop requested, consumer is already shutting down')
return
elif self.is_waiting_to_shutdown:
LOGGER.warning('Stop requested but already waiting to shut down')
return
# Stop consuming
self.cancel_consumer_with_rabbitmq()
# Wait until the consumer has finished processing to shutdown
if self.is_processing:
LOGGER.info('Waiting for consumer to finish processing')
self.set_state(self.STATE_STOP_REQUESTED)
if signum == signal.SIGTERM:
signal.siginterrupt(signal.SIGTERM, False)
return
self.on_ready_to_stop() |
Python | def stop_consumer(self):
"""Stop the consumer object and allow it to do a clean shutdown if it
has the ability to do so.
"""
try:
LOGGER.info('Shutting down the consumer')
self._consumer.shutdown()
except AttributeError:
LOGGER.debug('Consumer does not have a shutdown method') | def stop_consumer(self):
"""Stop the consumer object and allow it to do a clean shutdown if it
has the ability to do so.
"""
try:
LOGGER.info('Shutting down the consumer')
self._consumer.shutdown()
except AttributeError:
LOGGER.debug('Consumer does not have a shutdown method') |
Python | def time_in_state(self):
"""Return the time that has been spent in the current state.
:rtype: float
"""
return time.time() - self._state_start | def time_in_state(self):
"""Return the time that has been spent in the current state.
:rtype: float
"""
return time.time() - self._state_start |
Python | def too_many_errors(self):
"""Return a bool if too many errors have occurred.
:rtype: bool
"""
return self.count(self.ERROR) >= self._max_error_count | def too_many_errors(self):
"""Return a bool if too many errors have occurred.
:rtype: bool
"""
return self.count(self.ERROR) >= self._max_error_count |
Python | def _prepend_python_path(self, path): #pragma: no cover
"""Add the specified value to the python path.
:param str path: The path to append
"""
LOGGER.debug('Prepending "%s" to the python path.', path)
sys.path.insert(0, path) | def _prepend_python_path(self, path): #pragma: no cover
"""Add the specified value to the python path.
:param str path: The path to append
"""
LOGGER.debug('Prepending "%s" to the python path.', path)
sys.path.insert(0, path) |
Python | def active_processes(self):
"""Return a list of all active processes, pruning dead ones
:rtype: list
"""
active_processes, dead_processes = list(), list()
for consumer in self._consumers:
for name in self._consumers[consumer]['processes']:
child = self.get_consumer_process(consumer, name)
if int(child.pid) == os.getpid():
continue
try:
proc = psutil.Process(child.pid)
except psutil.NoSuchProcess:
dead_processes.append((consumer, name))
continue
if self.is_a_dead_or_zombie_process(proc):
dead_processes.append((consumer, name))
else:
active_processes.append(child)
if dead_processes:
LOGGER.debug('Removing %i dead process(es)', len(dead_processes))
for proc in dead_processes:
self.remove_consumer_process(*proc)
return active_processes | def active_processes(self):
"""Return a list of all active processes, pruning dead ones
:rtype: list
"""
active_processes, dead_processes = list(), list()
for consumer in self._consumers:
for name in self._consumers[consumer]['processes']:
child = self.get_consumer_process(consumer, name)
if int(child.pid) == os.getpid():
continue
try:
proc = psutil.Process(child.pid)
except psutil.NoSuchProcess:
dead_processes.append((consumer, name))
continue
if self.is_a_dead_or_zombie_process(proc):
dead_processes.append((consumer, name))
else:
active_processes.append(child)
if dead_processes:
LOGGER.debug('Removing %i dead process(es)', len(dead_processes))
for proc in dead_processes:
self.remove_consumer_process(*proc)
return active_processes |
Python | def calculate_stats(self, data):
"""Calculate the stats data for our process level data.
:param data: The collected stats data to report on
:type data: dict
"""
timestamp = data['timestamp']
del data['timestamp']
LOGGER.debug('Calculating stats for data timestamp: %i', timestamp)
# Iterate through the last poll results
stats = self.consumer_stats_counter()
consumer_stats = dict()
for name in data.keys():
consumer_stats[name] = self.consumer_stats_counter()
consumer_stats[name]['processes'] = \
self.process_count_by_consumer(name)
for proc in data[name].keys():
for key in stats:
value = data[name][proc]['counts'][key]
stats[key] += value
consumer_stats[name][key] += value
# Return a data structure that can be used in reporting out the stats
stats['processes'] = len(self.active_processes)
return {'last_poll': timestamp,
'consumers': consumer_stats,
'process_data': data,
'counts': stats} | def calculate_stats(self, data):
"""Calculate the stats data for our process level data.
:param data: The collected stats data to report on
:type data: dict
"""
timestamp = data['timestamp']
del data['timestamp']
LOGGER.debug('Calculating stats for data timestamp: %i', timestamp)
# Iterate through the last poll results
stats = self.consumer_stats_counter()
consumer_stats = dict()
for name in data.keys():
consumer_stats[name] = self.consumer_stats_counter()
consumer_stats[name]['processes'] = \
self.process_count_by_consumer(name)
for proc in data[name].keys():
for key in stats:
value = data[name][proc]['counts'][key]
stats[key] += value
consumer_stats[name][key] += value
# Return a data structure that can be used in reporting out the stats
stats['processes'] = len(self.active_processes)
return {'last_poll': timestamp,
'consumers': consumer_stats,
'process_data': data,
'counts': stats} |
Python | def calculate_velocity(counts):
"""Calculate the message velocity to determine how many messages are
processed per second.
:param dict counts: The count dictionary to use for calculation
:rtype: float
"""
total_time = counts['idle_time'] + counts['processing_time']
if total_time and counts['processed']:
return float(counts['processed'] / float(total_time))
return 0 | def calculate_velocity(counts):
"""Calculate the message velocity to determine how many messages are
processed per second.
:param dict counts: The count dictionary to use for calculation
:rtype: float
"""
total_time = counts['idle_time'] + counts['processing_time']
if total_time and counts['processed']:
return float(counts['processed'] / float(total_time))
return 0 |
Python | def check_process_counts(self):
"""Check for the minimum consumer process levels and start up new
processes needed.
"""
LOGGER.debug('Checking minimum consumer process levels')
for name in self._consumers:
for connection in self._consumers[name]['connections']:
processes_needed = self.process_spawn_qty(name, connection)
LOGGER.debug('Need to spawn %i processes for %s on %s',
processes_needed, name, connection)
if processes_needed:
self.start_processes(name, connection, processes_needed) | def check_process_counts(self):
"""Check for the minimum consumer process levels and start up new
processes needed.
"""
LOGGER.debug('Checking minimum consumer process levels')
for name in self._consumers:
for connection in self._consumers[name]['connections']:
processes_needed = self.process_spawn_qty(name, connection)
LOGGER.debug('Need to spawn %i processes for %s on %s',
processes_needed, name, connection)
if processes_needed:
self.start_processes(name, connection, processes_needed) |
Python | def collect_results(self, data_values):
"""Receive the data from the consumers polled and process it.
:param dict data_values: The poll data returned from the consumer
:type data_values: dict
"""
self._last_poll_results['timestamp'] = self._poll_data['timestamp']
# Get the name and consumer name and remove it from what is reported
consumer_name = data_values['consumer_name']
del data_values['consumer_name']
process_name = data_values['name']
del data_values['name']
# Add it to our last poll global data
if consumer_name not in self._last_poll_results:
self._last_poll_results[consumer_name] = dict()
self._last_poll_results[consumer_name][process_name] = data_values
# Calculate the stats
self._stats = self.calculate_stats(self._last_poll_results) | def collect_results(self, data_values):
"""Receive the data from the consumers polled and process it.
:param dict data_values: The poll data returned from the consumer
:type data_values: dict
"""
self._last_poll_results['timestamp'] = self._poll_data['timestamp']
# Get the name and consumer name and remove it from what is reported
consumer_name = data_values['consumer_name']
del data_values['consumer_name']
process_name = data_values['name']
del data_values['name']
# Add it to our last poll global data
if consumer_name not in self._last_poll_results:
self._last_poll_results[consumer_name] = dict()
self._last_poll_results[consumer_name][process_name] = data_values
# Calculate the stats
self._stats = self.calculate_stats(self._last_poll_results) |
Python | def consumer_dict(self, configuration):
"""Return a consumer dict for the given name and configuration.
:param dict configuration: The consumer configuration
:rtype: dict
"""
# Keep a dict that has a list of processes by connection
connections = dict()
for connection in configuration['connections']:
connections[connection] = list()
return {'connections': connections,
'qty': configuration.get('qty', self._DEFAULT_CONSUMER_QTY),
'last_proc_num': 0,
'queue': configuration['queue'],
'processes': dict()} | def consumer_dict(self, configuration):
"""Return a consumer dict for the given name and configuration.
:param dict configuration: The consumer configuration
:rtype: dict
"""
# Keep a dict that has a list of processes by connection
connections = dict()
for connection in configuration['connections']:
connections[connection] = list()
return {'connections': connections,
'qty': configuration.get('qty', self._DEFAULT_CONSUMER_QTY),
'last_proc_num': 0,
'queue': configuration['queue'],
'processes': dict()} |
Python | def consumer_stats_counter():
"""Return a new consumer stats counter instance.
:rtype: dict
"""
return {process.Process.ERROR: 0,
process.Process.PROCESSED: 0,
process.Process.REDELIVERED: 0,
process.Process.TIME_SPENT: 0,
process.Process.TIME_WAITED: 0} | def consumer_stats_counter():
"""Return a new consumer stats counter instance.
:rtype: dict
"""
return {process.Process.ERROR: 0,
process.Process.PROCESSED: 0,
process.Process.REDELIVERED: 0,
process.Process.TIME_SPENT: 0,
process.Process.TIME_WAITED: 0} |
Python | def is_a_dead_or_zombie_process(process):
"""Checks to see if the specified process is a zombie or dead.
:param psutil.Process: The process to check
:rtype: bool
"""
try:
if process.status in [psutil.STATUS_DEAD, psutil.STATUS_ZOMBIE]:
try:
LOGGER.debug('Found dead or zombie process with '
'%s fds',
process.get_num_fds())
except psutil.AccessDenied as error:
LOGGER.debug('Found dead or zombie process, '
'could not get fd count: %s', error)
return True
except psutil.NoSuchProcess:
LOGGER.debug('Found dead or zombie process')
return True
return False | def is_a_dead_or_zombie_process(process):
"""Checks to see if the specified process is a zombie or dead.
:param psutil.Process: The process to check
:rtype: bool
"""
try:
if process.status in [psutil.STATUS_DEAD, psutil.STATUS_ZOMBIE]:
try:
LOGGER.debug('Found dead or zombie process with '
'%s fds',
process.get_num_fds())
except psutil.AccessDenied as error:
LOGGER.debug('Found dead or zombie process, '
'could not get fd count: %s', error)
return True
except psutil.NoSuchProcess:
LOGGER.debug('Found dead or zombie process')
return True
return False |
Python | def kill_processes(self):
"""Gets called on shutdown by the timer when too much time has gone by,
calling the terminate method instead of nicely asking for the consumers
to stop.
"""
LOGGER.critical('Max shutdown exceeded, forcibly exiting')
processes = True
while processes:
processes = self.active_processes
for process in processes:
if int(process.pid) != int(os.getpid()):
LOGGER.warning('Killing %s (%s)', process.name, process.pid)
os.kill(int(process.pid), signal.SIGKILL)
else:
LOGGER.warning('Cowardly refusing kill self (%s, %s)',
process.pid, os.getpid())
time.sleep(0.5)
LOGGER.info('Killed all children')
return self.set_state(self.STATE_STOPPED) | def kill_processes(self):
"""Gets called on shutdown by the timer when too much time has gone by,
calling the terminate method instead of nicely asking for the consumers
to stop.
"""
LOGGER.critical('Max shutdown exceeded, forcibly exiting')
processes = True
while processes:
processes = self.active_processes
for process in processes:
if int(process.pid) != int(os.getpid()):
LOGGER.warning('Killing %s (%s)', process.name, process.pid)
os.kill(int(process.pid), signal.SIGKILL)
else:
LOGGER.warning('Cowardly refusing kill self (%s, %s)',
process.pid, os.getpid())
time.sleep(0.5)
LOGGER.info('Killed all children')
return self.set_state(self.STATE_STOPPED) |
Python | def log_stats(self):
"""Output the stats to the LOGGER."""
if not self._stats.get('counts'):
LOGGER.info('Did not receive any stats data from children')
return
LOGGER.info('%i total %s have processed %i messages with %i '
'errors, waiting %.2f seconds and have spent %.2f seconds '
'processing messages with an overall velocity of %.2f '
'messages per second.',
self._stats['counts']['processes'],
self.consumer_keyword(self._stats['counts']),
self._stats['counts']['processed'],
self._stats['counts']['failed'],
self._stats['counts']['idle_time'],
self._stats['counts']['processing_time'],
self.calculate_velocity(self._stats['counts']))
for key in self._stats['consumers'].keys():
LOGGER.info('%i %s for %s have processed %i messages with %i '
'errors, waiting %.2f seconds and have spent %.2f '
'seconds processing messages with an overall velocity '
'of %.2f messages per second.',
self._stats['consumers'][key]['processes'],
self.consumer_keyword(self._stats['consumers'][key]),
key,
self._stats['consumers'][key]['processed'],
self._stats['consumers'][key]['failed'],
self._stats['consumers'][key]['idle_time'],
self._stats['consumers'][key]['processing_time'],
self.calculate_velocity(self._stats['consumers'][key]))
if self._poll_data['processes']:
LOGGER.warning('%i process(es) did not respond with stats in '
'time: %r',
len(self._poll_data['processes']),
self._poll_data['processes']) | def log_stats(self):
"""Output the stats to the LOGGER."""
if not self._stats.get('counts'):
LOGGER.info('Did not receive any stats data from children')
return
LOGGER.info('%i total %s have processed %i messages with %i '
'errors, waiting %.2f seconds and have spent %.2f seconds '
'processing messages with an overall velocity of %.2f '
'messages per second.',
self._stats['counts']['processes'],
self.consumer_keyword(self._stats['counts']),
self._stats['counts']['processed'],
self._stats['counts']['failed'],
self._stats['counts']['idle_time'],
self._stats['counts']['processing_time'],
self.calculate_velocity(self._stats['counts']))
for key in self._stats['consumers'].keys():
LOGGER.info('%i %s for %s have processed %i messages with %i '
'errors, waiting %.2f seconds and have spent %.2f '
'seconds processing messages with an overall velocity '
'of %.2f messages per second.',
self._stats['consumers'][key]['processes'],
self.consumer_keyword(self._stats['consumers'][key]),
key,
self._stats['consumers'][key]['processed'],
self._stats['consumers'][key]['failed'],
self._stats['consumers'][key]['idle_time'],
self._stats['consumers'][key]['processing_time'],
self.calculate_velocity(self._stats['consumers'][key]))
if self._poll_data['processes']:
LOGGER.warning('%i process(es) did not respond with stats in '
'time: %r',
len(self._poll_data['processes']),
self._poll_data['processes']) |
Python | def new_process_number(self, name):
"""Increment the counter for the process id number for a given consumer
configuration.
:param str name: Consumer name
:rtype: int
"""
self._consumers[name]['last_proc_num'] += 1
return self._consumers[name]['last_proc_num'] | def new_process_number(self, name):
"""Increment the counter for the process id number for a given consumer
configuration.
:param str name: Consumer name
:rtype: int
"""
self._consumers[name]['last_proc_num'] += 1
return self._consumers[name]['last_proc_num'] |
Python | def poll(self):
"""Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning.
"""
self.set_state(self.STATE_ACTIVE)
# If we don't have any active consumers, shutdown
if not self.total_process_count:
LOGGER.debug('Did not find any active consumers in poll')
return self.set_state(self.STATE_STOPPED)
# Start our data collection dict
self._poll_data = {'timestamp': time.time(),
'processes': list()}
# Iterate through all of the consumers
for proc in self.active_processes:
LOGGER.debug('Checking runtime state of %s', proc.name)
if proc == multiprocessing.current_process():
LOGGER.debug('Matched current process in active_processes')
continue
# Send the profile signal
os.kill(int(proc.pid), signal.SIGPROF)
self._poll_data['processes'].append(proc.name)
# Check if we need to start more processes
self.check_process_counts() | def poll(self):
"""Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning.
"""
self.set_state(self.STATE_ACTIVE)
# If we don't have any active consumers, shutdown
if not self.total_process_count:
LOGGER.debug('Did not find any active consumers in poll')
return self.set_state(self.STATE_STOPPED)
# Start our data collection dict
self._poll_data = {'timestamp': time.time(),
'processes': list()}
# Iterate through all of the consumers
for proc in self.active_processes:
LOGGER.debug('Checking runtime state of %s', proc.name)
if proc == multiprocessing.current_process():
LOGGER.debug('Matched current process in active_processes')
continue
# Send the profile signal
os.kill(int(proc.pid), signal.SIGPROF)
self._poll_data['processes'].append(proc.name)
# Check if we need to start more processes
self.check_process_counts() |
Python | def poll_duration_exceeded(self):
"""Return true if the poll time has been exceeded.
:rtype: bool
"""
return (time.time() -
self._poll_data['timestamp']) >= self._poll_interval | def poll_duration_exceeded(self):
"""Return true if the poll time has been exceeded.
:rtype: bool
"""
return (time.time() -
self._poll_data['timestamp']) >= self._poll_interval |
Python | def poll_results_check(self):
"""Check the polling results by checking to see if the stats queue is
empty. If it is not, try and collect stats. If it is set a timer to
call ourselves in _POLL_RESULTS_INTERVAL.
"""
LOGGER.debug('Checking for poll results')
while True:
try:
stats = self._stats_queue.get(False)
except Queue.Empty:
break
self._poll_data['processes'].remove(stats['name'])
self.collect_results(stats)
if self._poll_data['processes']:
LOGGER.warning('Did not receive results from %r',
self._poll_data['processes']) | def poll_results_check(self):
"""Check the polling results by checking to see if the stats queue is
empty. If it is not, try and collect stats. If it is set a timer to
call ourselves in _POLL_RESULTS_INTERVAL.
"""
LOGGER.debug('Checking for poll results')
while True:
try:
stats = self._stats_queue.get(False)
except Queue.Empty:
break
self._poll_data['processes'].remove(stats['name'])
self.collect_results(stats)
if self._poll_data['processes']:
LOGGER.warning('Did not receive results from %r',
self._poll_data['processes']) |
Python | def process(self, consumer_name, process_name):
"""Return the process handle for the given consumer name and process
name.
:param str consumer_name: The consumer name from config
:param str process_name: The automatically assigned process name
:rtype: rejected.process.Process
"""
return self._consumers[consumer_name]['processes'][process_name] | def process(self, consumer_name, process_name):
"""Return the process handle for the given consumer name and process
name.
:param str consumer_name: The consumer name from config
:param str process_name: The automatically assigned process name
:rtype: rejected.process.Process
"""
return self._consumers[consumer_name]['processes'][process_name] |
Python | def process_count(self, name, connection):
"""Return the process count for the given consumer name and connection.
:param str name: The consumer name
:param str connection: The connection name
:rtype: int
"""
return len(self._consumers[name]['connections'][connection]) | def process_count(self, name, connection):
"""Return the process count for the given consumer name and connection.
:param str name: The consumer name
:param str connection: The connection name
:rtype: int
"""
return len(self._consumers[name]['connections'][connection]) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.