repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
bitlabstudio/django-influxdb-metrics
influxdb_metrics/utils.py
get_client
def get_client(): """Returns an ``InfluxDBClient`` instance.""" return InfluxDBClient( settings.INFLUXDB_HOST, settings.INFLUXDB_PORT, settings.INFLUXDB_USER, settings.INFLUXDB_PASSWORD, settings.INFLUXDB_DATABASE, timeout=settings.INFLUXDB_TIMEOUT, ssl=getattr(settings, 'INFLUXDB_SSL', False), verify_ssl=getattr(settings, 'INFLUXDB_VERIFY_SSL', False), )
python
def get_client(): """Returns an ``InfluxDBClient`` instance.""" return InfluxDBClient( settings.INFLUXDB_HOST, settings.INFLUXDB_PORT, settings.INFLUXDB_USER, settings.INFLUXDB_PASSWORD, settings.INFLUXDB_DATABASE, timeout=settings.INFLUXDB_TIMEOUT, ssl=getattr(settings, 'INFLUXDB_SSL', False), verify_ssl=getattr(settings, 'INFLUXDB_VERIFY_SSL', False), )
Returns an ``InfluxDBClient`` instance.
https://github.com/bitlabstudio/django-influxdb-metrics/blob/c9f368e28a6072813454b6b549b4afa64aad778a/influxdb_metrics/utils.py#L13-L24
bitlabstudio/django-influxdb-metrics
influxdb_metrics/utils.py
write_points
def write_points(data, force_disable_threading=False): """ Writes a series to influxdb. :param data: Array of dicts, as required by https://github.com/influxdb/influxdb-python :param force_disable_threading: When being called from the Celery task, we set this to `True` so that the user doesn't accidentally use Celery and threading at the same time. """ if getattr(settings, 'INFLUXDB_DISABLED', False): return client = get_client() use_threading = getattr(settings, 'INFLUXDB_USE_THREADING', False) if force_disable_threading: use_threading = False if use_threading is True: thread = Thread(target=process_points, args=(client, data, )) thread.start() else: process_points(client, data)
python
def write_points(data, force_disable_threading=False): """ Writes a series to influxdb. :param data: Array of dicts, as required by https://github.com/influxdb/influxdb-python :param force_disable_threading: When being called from the Celery task, we set this to `True` so that the user doesn't accidentally use Celery and threading at the same time. """ if getattr(settings, 'INFLUXDB_DISABLED', False): return client = get_client() use_threading = getattr(settings, 'INFLUXDB_USE_THREADING', False) if force_disable_threading: use_threading = False if use_threading is True: thread = Thread(target=process_points, args=(client, data, )) thread.start() else: process_points(client, data)
Writes a series to influxdb. :param data: Array of dicts, as required by https://github.com/influxdb/influxdb-python :param force_disable_threading: When being called from the Celery task, we set this to `True` so that the user doesn't accidentally use Celery and threading at the same time.
https://github.com/bitlabstudio/django-influxdb-metrics/blob/c9f368e28a6072813454b6b549b4afa64aad778a/influxdb_metrics/utils.py#L33-L55
bitlabstudio/django-influxdb-metrics
influxdb_metrics/utils.py
process_points
def process_points(client, data): # pragma: no cover """Method to be called via threading module.""" try: client.write_points(data) except Exception: if getattr(settings, 'INFLUXDB_FAIL_SILENTLY', True): logger.exception('Error while writing data points') else: raise
python
def process_points(client, data): # pragma: no cover """Method to be called via threading module.""" try: client.write_points(data) except Exception: if getattr(settings, 'INFLUXDB_FAIL_SILENTLY', True): logger.exception('Error while writing data points') else: raise
Method to be called via threading module.
https://github.com/bitlabstudio/django-influxdb-metrics/blob/c9f368e28a6072813454b6b549b4afa64aad778a/influxdb_metrics/utils.py#L58-L66
keiichishima/pcalg
pcalg.py
_create_complete_graph
def _create_complete_graph(node_ids): """Create a complete graph from the list of node ids. Args: node_ids: a list of node ids Returns: An undirected graph (as a networkx.Graph) """ g = nx.Graph() g.add_nodes_from(node_ids) for (i, j) in combinations(node_ids, 2): g.add_edge(i, j) return g
python
def _create_complete_graph(node_ids): """Create a complete graph from the list of node ids. Args: node_ids: a list of node ids Returns: An undirected graph (as a networkx.Graph) """ g = nx.Graph() g.add_nodes_from(node_ids) for (i, j) in combinations(node_ids, 2): g.add_edge(i, j) return g
Create a complete graph from the list of node ids. Args: node_ids: a list of node ids Returns: An undirected graph (as a networkx.Graph)
https://github.com/keiichishima/pcalg/blob/f270e2bdb76b88c8f80a1ea07317ff4be88e2359/pcalg.py#L22-L35
keiichishima/pcalg
pcalg.py
estimate_skeleton
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs): """Estimate a skeleton graph from the statistis information. Args: indep_test_func: the function name for a conditional independency test. data_matrix: data (as a numpy array). alpha: the significance level. kwargs: 'max_reach': maximum value of l (see the code). The value depends on the underlying distribution. 'method': if 'stable' given, use stable-PC algorithm (see [Colombo2014]). 'init_graph': initial structure of skeleton graph (as a networkx.Graph). If not specified, a complete graph is used. other parameters may be passed depending on the indep_test_func()s. Returns: g: a skeleton graph (as a networkx.Graph). sep_set: a separation set (as an 2D-array of set()). [Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent constraint-based causal structure learning. In The Journal of Machine Learning Research, Vol. 15, pp. 3741-3782, 2014. """ def method_stable(kwargs): return ('method' in kwargs) and kwargs['method'] == "stable" node_ids = range(data_matrix.shape[1]) node_size = data_matrix.shape[1] sep_set = [[set() for i in range(node_size)] for j in range(node_size)] if 'init_graph' in kwargs: g = kwargs['init_graph'] if not isinstance(g, nx.Graph): raise ValueError elif not g.number_of_nodes() == len(node_ids): raise ValueError('init_graph not matching data_matrix shape') for (i, j) in combinations(node_ids, 2): if not g.has_edge(i, j): sep_set[i][j] = None sep_set[j][i] = None else: g = _create_complete_graph(node_ids) l = 0 while True: cont = False remove_edges = [] for (i, j) in permutations(node_ids, 2): adj_i = list(g.neighbors(i)) if j not in adj_i: continue else: adj_i.remove(j) if len(adj_i) >= l: _logger.debug('testing %s and %s' % (i,j)) _logger.debug('neighbors of %s are %s' % (i, str(adj_i))) if len(adj_i) < l: continue for k in combinations(adj_i, l): _logger.debug('indep prob of %s and %s with subset %s' % (i, j, str(k))) p_val = indep_test_func(data_matrix, i, j, set(k), **kwargs) _logger.debug('p_val is %s' % str(p_val)) if p_val > alpha: if g.has_edge(i, j): _logger.debug('p: remove edge (%s, %s)' % (i, j)) if method_stable(kwargs): remove_edges.append((i, j)) else: g.remove_edge(i, j) sep_set[i][j] |= set(k) sep_set[j][i] |= set(k) break cont = True l += 1 if method_stable(kwargs): g.remove_edges_from(remove_edges) if cont is False: break if ('max_reach' in kwargs) and (l > kwargs['max_reach']): break return (g, sep_set)
python
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs): """Estimate a skeleton graph from the statistis information. Args: indep_test_func: the function name for a conditional independency test. data_matrix: data (as a numpy array). alpha: the significance level. kwargs: 'max_reach': maximum value of l (see the code). The value depends on the underlying distribution. 'method': if 'stable' given, use stable-PC algorithm (see [Colombo2014]). 'init_graph': initial structure of skeleton graph (as a networkx.Graph). If not specified, a complete graph is used. other parameters may be passed depending on the indep_test_func()s. Returns: g: a skeleton graph (as a networkx.Graph). sep_set: a separation set (as an 2D-array of set()). [Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent constraint-based causal structure learning. In The Journal of Machine Learning Research, Vol. 15, pp. 3741-3782, 2014. """ def method_stable(kwargs): return ('method' in kwargs) and kwargs['method'] == "stable" node_ids = range(data_matrix.shape[1]) node_size = data_matrix.shape[1] sep_set = [[set() for i in range(node_size)] for j in range(node_size)] if 'init_graph' in kwargs: g = kwargs['init_graph'] if not isinstance(g, nx.Graph): raise ValueError elif not g.number_of_nodes() == len(node_ids): raise ValueError('init_graph not matching data_matrix shape') for (i, j) in combinations(node_ids, 2): if not g.has_edge(i, j): sep_set[i][j] = None sep_set[j][i] = None else: g = _create_complete_graph(node_ids) l = 0 while True: cont = False remove_edges = [] for (i, j) in permutations(node_ids, 2): adj_i = list(g.neighbors(i)) if j not in adj_i: continue else: adj_i.remove(j) if len(adj_i) >= l: _logger.debug('testing %s and %s' % (i,j)) _logger.debug('neighbors of %s are %s' % (i, str(adj_i))) if len(adj_i) < l: continue for k in combinations(adj_i, l): _logger.debug('indep prob of %s and %s with subset %s' % (i, j, str(k))) p_val = indep_test_func(data_matrix, i, j, set(k), **kwargs) _logger.debug('p_val is %s' % str(p_val)) if p_val > alpha: if g.has_edge(i, j): _logger.debug('p: remove edge (%s, %s)' % (i, j)) if method_stable(kwargs): remove_edges.append((i, j)) else: g.remove_edge(i, j) sep_set[i][j] |= set(k) sep_set[j][i] |= set(k) break cont = True l += 1 if method_stable(kwargs): g.remove_edges_from(remove_edges) if cont is False: break if ('max_reach' in kwargs) and (l > kwargs['max_reach']): break return (g, sep_set)
Estimate a skeleton graph from the statistis information. Args: indep_test_func: the function name for a conditional independency test. data_matrix: data (as a numpy array). alpha: the significance level. kwargs: 'max_reach': maximum value of l (see the code). The value depends on the underlying distribution. 'method': if 'stable' given, use stable-PC algorithm (see [Colombo2014]). 'init_graph': initial structure of skeleton graph (as a networkx.Graph). If not specified, a complete graph is used. other parameters may be passed depending on the indep_test_func()s. Returns: g: a skeleton graph (as a networkx.Graph). sep_set: a separation set (as an 2D-array of set()). [Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent constraint-based causal structure learning. In The Journal of Machine Learning Research, Vol. 15, pp. 3741-3782, 2014.
https://github.com/keiichishima/pcalg/blob/f270e2bdb76b88c8f80a1ea07317ff4be88e2359/pcalg.py#L37-L123
keiichishima/pcalg
pcalg.py
estimate_cpdag
def estimate_cpdag(skel_graph, sep_set): """Estimate a CPDAG from the skeleton graph and separation sets returned by the estimate_skeleton() function. Args: skel_graph: A skeleton graph (an undirected networkx.Graph). sep_set: An 2D-array of separation set. The contents look like something like below. sep_set[i][j] = set([k, l, m]) Returns: An estimated DAG. """ dag = skel_graph.to_directed() node_ids = skel_graph.nodes() for (i, j) in combinations(node_ids, 2): adj_i = set(dag.successors(i)) if j in adj_i: continue adj_j = set(dag.successors(j)) if i in adj_j: continue if sep_set[i][j] is None: continue common_k = adj_i & adj_j for k in common_k: if k not in sep_set[i][j]: if dag.has_edge(k, i): _logger.debug('S: remove edge (%s, %s)' % (k, i)) dag.remove_edge(k, i) if dag.has_edge(k, j): _logger.debug('S: remove edge (%s, %s)' % (k, j)) dag.remove_edge(k, j) def _has_both_edges(dag, i, j): return dag.has_edge(i, j) and dag.has_edge(j, i) def _has_any_edge(dag, i, j): return dag.has_edge(i, j) or dag.has_edge(j, i) def _has_one_edge(dag, i, j): return ((dag.has_edge(i, j) and (not dag.has_edge(j, i))) or (not dag.has_edge(i, j)) and dag.has_edge(j, i)) def _has_no_edge(dag, i, j): return (not dag.has_edge(i, j)) and (not dag.has_edge(j, i)) # For all the combination of nodes i and j, apply the following # rules. old_dag = dag.copy() while True: for (i, j) in combinations(node_ids, 2): # Rule 1: Orient i-j into i->j whenever there is an arrow k->i # such that k and j are nonadjacent. # # Check if i-j. if _has_both_edges(dag, i, j): # Look all the predecessors of i. for k in dag.predecessors(i): # Skip if there is an arrow i->k. if dag.has_edge(i, k): continue # Skip if k and j are adjacent. if _has_any_edge(dag, k, j): continue # Make i-j into i->j _logger.debug('R1: remove edge (%s, %s)' % (j, i)) dag.remove_edge(j, i) break # Rule 2: Orient i-j into i->j whenever there is a chain # i->k->j. # # Check if i-j. if _has_both_edges(dag, i, j): # Find nodes k where k is i->k. succs_i = set() for k in dag.successors(i): if not dag.has_edge(k, i): succs_i.add(k) # Find nodes j where j is k->j. preds_j = set() for k in dag.predecessors(j): if not dag.has_edge(j, k): preds_j.add(k) # Check if there is any node k where i->k->j. if len(succs_i & preds_j) > 0: # Make i-j into i->j _logger.debug('R2: remove edge (%s, %s)' % (j, i)) dag.remove_edge(j, i) # Rule 3: Orient i-j into i->j whenever there are two chains # i-k->j and i-l->j such that k and l are nonadjacent. # # Check if i-j. if _has_both_edges(dag, i, j): # Find nodes k where i-k. adj_i = set() for k in dag.successors(i): if dag.has_edge(k, i): adj_i.add(k) # For all the pairs of nodes in adj_i, for (k, l) in combinations(adj_i, 2): # Skip if k and l are adjacent. if _has_any_edge(dag, k, l): continue # Skip if not k->j. if dag.has_edge(j, k) or (not dag.has_edge(k, j)): continue # Skip if not l->j. if dag.has_edge(j, l) or (not dag.has_edge(l, j)): continue # Make i-j into i->j. _logger.debug('R3: remove edge (%s, %s)' % (j, i)) dag.remove_edge(j, i) break # Rule 4: Orient i-j into i->j whenever there are two chains # i-k->l and k->l->j such that k and j are nonadjacent. # # However, this rule is not necessary when the PC-algorithm # is used to estimate a DAG. if nx.is_isomorphic(dag, old_dag): break old_dag = dag.copy() return dag
python
def estimate_cpdag(skel_graph, sep_set): """Estimate a CPDAG from the skeleton graph and separation sets returned by the estimate_skeleton() function. Args: skel_graph: A skeleton graph (an undirected networkx.Graph). sep_set: An 2D-array of separation set. The contents look like something like below. sep_set[i][j] = set([k, l, m]) Returns: An estimated DAG. """ dag = skel_graph.to_directed() node_ids = skel_graph.nodes() for (i, j) in combinations(node_ids, 2): adj_i = set(dag.successors(i)) if j in adj_i: continue adj_j = set(dag.successors(j)) if i in adj_j: continue if sep_set[i][j] is None: continue common_k = adj_i & adj_j for k in common_k: if k not in sep_set[i][j]: if dag.has_edge(k, i): _logger.debug('S: remove edge (%s, %s)' % (k, i)) dag.remove_edge(k, i) if dag.has_edge(k, j): _logger.debug('S: remove edge (%s, %s)' % (k, j)) dag.remove_edge(k, j) def _has_both_edges(dag, i, j): return dag.has_edge(i, j) and dag.has_edge(j, i) def _has_any_edge(dag, i, j): return dag.has_edge(i, j) or dag.has_edge(j, i) def _has_one_edge(dag, i, j): return ((dag.has_edge(i, j) and (not dag.has_edge(j, i))) or (not dag.has_edge(i, j)) and dag.has_edge(j, i)) def _has_no_edge(dag, i, j): return (not dag.has_edge(i, j)) and (not dag.has_edge(j, i)) # For all the combination of nodes i and j, apply the following # rules. old_dag = dag.copy() while True: for (i, j) in combinations(node_ids, 2): # Rule 1: Orient i-j into i->j whenever there is an arrow k->i # such that k and j are nonadjacent. # # Check if i-j. if _has_both_edges(dag, i, j): # Look all the predecessors of i. for k in dag.predecessors(i): # Skip if there is an arrow i->k. if dag.has_edge(i, k): continue # Skip if k and j are adjacent. if _has_any_edge(dag, k, j): continue # Make i-j into i->j _logger.debug('R1: remove edge (%s, %s)' % (j, i)) dag.remove_edge(j, i) break # Rule 2: Orient i-j into i->j whenever there is a chain # i->k->j. # # Check if i-j. if _has_both_edges(dag, i, j): # Find nodes k where k is i->k. succs_i = set() for k in dag.successors(i): if not dag.has_edge(k, i): succs_i.add(k) # Find nodes j where j is k->j. preds_j = set() for k in dag.predecessors(j): if not dag.has_edge(j, k): preds_j.add(k) # Check if there is any node k where i->k->j. if len(succs_i & preds_j) > 0: # Make i-j into i->j _logger.debug('R2: remove edge (%s, %s)' % (j, i)) dag.remove_edge(j, i) # Rule 3: Orient i-j into i->j whenever there are two chains # i-k->j and i-l->j such that k and l are nonadjacent. # # Check if i-j. if _has_both_edges(dag, i, j): # Find nodes k where i-k. adj_i = set() for k in dag.successors(i): if dag.has_edge(k, i): adj_i.add(k) # For all the pairs of nodes in adj_i, for (k, l) in combinations(adj_i, 2): # Skip if k and l are adjacent. if _has_any_edge(dag, k, l): continue # Skip if not k->j. if dag.has_edge(j, k) or (not dag.has_edge(k, j)): continue # Skip if not l->j. if dag.has_edge(j, l) or (not dag.has_edge(l, j)): continue # Make i-j into i->j. _logger.debug('R3: remove edge (%s, %s)' % (j, i)) dag.remove_edge(j, i) break # Rule 4: Orient i-j into i->j whenever there are two chains # i-k->l and k->l->j such that k and j are nonadjacent. # # However, this rule is not necessary when the PC-algorithm # is used to estimate a DAG. if nx.is_isomorphic(dag, old_dag): break old_dag = dag.copy() return dag
Estimate a CPDAG from the skeleton graph and separation sets returned by the estimate_skeleton() function. Args: skel_graph: A skeleton graph (an undirected networkx.Graph). sep_set: An 2D-array of separation set. The contents look like something like below. sep_set[i][j] = set([k, l, m]) Returns: An estimated DAG.
https://github.com/keiichishima/pcalg/blob/f270e2bdb76b88c8f80a1ea07317ff4be88e2359/pcalg.py#L125-L252
fedora-infra/fedmsg
fedmsg/utils.py
set_high_water_mark
def set_high_water_mark(socket, config): """ Set a high water mark on the zmq socket. Do so in a way that is cross-compatible with zeromq2 and zeromq3. """ if config['high_water_mark']: if hasattr(zmq, 'HWM'): # zeromq2 socket.setsockopt(zmq.HWM, config['high_water_mark']) else: # zeromq3 socket.setsockopt(zmq.SNDHWM, config['high_water_mark']) socket.setsockopt(zmq.RCVHWM, config['high_water_mark'])
python
def set_high_water_mark(socket, config): """ Set a high water mark on the zmq socket. Do so in a way that is cross-compatible with zeromq2 and zeromq3. """ if config['high_water_mark']: if hasattr(zmq, 'HWM'): # zeromq2 socket.setsockopt(zmq.HWM, config['high_water_mark']) else: # zeromq3 socket.setsockopt(zmq.SNDHWM, config['high_water_mark']) socket.setsockopt(zmq.RCVHWM, config['high_water_mark'])
Set a high water mark on the zmq socket. Do so in a way that is cross-compatible with zeromq2 and zeromq3.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/utils.py#L32-L44
fedora-infra/fedmsg
fedmsg/utils.py
set_tcp_keepalive
def set_tcp_keepalive(socket, config): """ Set a series of TCP keepalive options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support We ran into a problem in FedoraInfrastructure where long-standing connections between some hosts would suddenly drop off the map silently. Because PUB/SUB sockets don't communicate regularly, nothing in the TCP stack would automatically try and fix the connection. With TCP_KEEPALIVE options (introduced in libzmq 3.2 and pyzmq 2.2.0.1) hopefully that will be fixed. See the following - http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html - http://api.zeromq.org/3-2:zmq-setsockopt """ keepalive_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_tcp_keepalive': 'TCP_KEEPALIVE', 'zmq_tcp_keepalive_cnt': 'TCP_KEEPALIVE_CNT', 'zmq_tcp_keepalive_idle': 'TCP_KEEPALIVE_IDLE', 'zmq_tcp_keepalive_intvl': 'TCP_KEEPALIVE_INTVL', } for key, const in keepalive_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
python
def set_tcp_keepalive(socket, config): """ Set a series of TCP keepalive options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support We ran into a problem in FedoraInfrastructure where long-standing connections between some hosts would suddenly drop off the map silently. Because PUB/SUB sockets don't communicate regularly, nothing in the TCP stack would automatically try and fix the connection. With TCP_KEEPALIVE options (introduced in libzmq 3.2 and pyzmq 2.2.0.1) hopefully that will be fixed. See the following - http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html - http://api.zeromq.org/3-2:zmq-setsockopt """ keepalive_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_tcp_keepalive': 'TCP_KEEPALIVE', 'zmq_tcp_keepalive_cnt': 'TCP_KEEPALIVE_CNT', 'zmq_tcp_keepalive_idle': 'TCP_KEEPALIVE_IDLE', 'zmq_tcp_keepalive_intvl': 'TCP_KEEPALIVE_INTVL', } for key, const in keepalive_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
Set a series of TCP keepalive options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support We ran into a problem in FedoraInfrastructure where long-standing connections between some hosts would suddenly drop off the map silently. Because PUB/SUB sockets don't communicate regularly, nothing in the TCP stack would automatically try and fix the connection. With TCP_KEEPALIVE options (introduced in libzmq 3.2 and pyzmq 2.2.0.1) hopefully that will be fixed. See the following - http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html - http://api.zeromq.org/3-2:zmq-setsockopt
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/utils.py#L59-L88
fedora-infra/fedmsg
fedmsg/utils.py
set_tcp_reconnect
def set_tcp_reconnect(socket, config): """ Set a series of TCP reconnect options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support Once our fedmsg bus grew to include many hundreds of endpoints, we started notices a *lot* of SYN-ACKs in the logs. By default, if an endpoint is unavailable, zeromq will attempt to reconnect every 100ms until it gets a connection. With this code, you can reconfigure that to back off exponentially to some max delay (like 1000ms) to reduce reconnect storm spam. See the following - http://api.zeromq.org/3-2:zmq-setsockopt """ reconnect_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_reconnect_ivl': 'RECONNECT_IVL', 'zmq_reconnect_ivl_max': 'RECONNECT_IVL_MAX', } for key, const in reconnect_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
python
def set_tcp_reconnect(socket, config): """ Set a series of TCP reconnect options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support Once our fedmsg bus grew to include many hundreds of endpoints, we started notices a *lot* of SYN-ACKs in the logs. By default, if an endpoint is unavailable, zeromq will attempt to reconnect every 100ms until it gets a connection. With this code, you can reconfigure that to back off exponentially to some max delay (like 1000ms) to reduce reconnect storm spam. See the following - http://api.zeromq.org/3-2:zmq-setsockopt """ reconnect_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_reconnect_ivl': 'RECONNECT_IVL', 'zmq_reconnect_ivl_max': 'RECONNECT_IVL_MAX', } for key, const in reconnect_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: socket.setsockopt(attr, config[key])
Set a series of TCP reconnect options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support Once our fedmsg bus grew to include many hundreds of endpoints, we started notices a *lot* of SYN-ACKs in the logs. By default, if an endpoint is unavailable, zeromq will attempt to reconnect every 100ms until it gets a connection. With this code, you can reconfigure that to back off exponentially to some max delay (like 1000ms) to reduce reconnect storm spam. See the following - http://api.zeromq.org/3-2:zmq-setsockopt
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/utils.py#L91-L117
fedora-infra/fedmsg
fedmsg/utils.py
load_class
def load_class(location): """ Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer' and return the IRCBotConsumer class. """ mod_name, cls_name = location = location.strip().split(':') tokens = mod_name.split('.') fromlist = '[]' if len(tokens) > 1: fromlist = '.'.join(tokens[:-1]) module = __import__(mod_name, fromlist=fromlist) try: return getattr(module, cls_name) except AttributeError: raise ImportError("%r not found in %r" % (cls_name, mod_name))
python
def load_class(location): """ Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer' and return the IRCBotConsumer class. """ mod_name, cls_name = location = location.strip().split(':') tokens = mod_name.split('.') fromlist = '[]' if len(tokens) > 1: fromlist = '.'.join(tokens[:-1]) module = __import__(mod_name, fromlist=fromlist) try: return getattr(module, cls_name) except AttributeError: raise ImportError("%r not found in %r" % (cls_name, mod_name))
Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer' and return the IRCBotConsumer class.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/utils.py#L120-L136
fedora-infra/fedmsg
fedmsg/utils.py
dict_query
def dict_query(dic, query): """ Query a dict with 'dotted notation'. Returns an OrderedDict. A query of "foo.bar.baz" would retrieve 'wat' from this:: dic = { 'foo': { 'bar': { 'baz': 'wat', } } } Multiple queries can be specified if comma-separated. For instance, the query "foo.bar.baz,foo.bar.something_else" would return this:: OrderedDict({ "foo.bar.baz": "wat", "foo.bar.something_else": None, }) """ if not isinstance(query, six.string_types): raise ValueError("query must be a string, not %r" % type(query)) def _browse(tokens, d): """ Recurse through a dict to retrieve a value. """ current, rest = tokens[0], tokens[1:] if not rest: return d.get(current, None) if current in d: if isinstance(d[current], dict): return _browse(rest, d[current]) elif rest: return None else: return d[current] keys = [key.strip().split('.') for key in query.split(',')] return OrderedDict([ ('.'.join(tokens), _browse(tokens, dic)) for tokens in keys ])
python
def dict_query(dic, query): """ Query a dict with 'dotted notation'. Returns an OrderedDict. A query of "foo.bar.baz" would retrieve 'wat' from this:: dic = { 'foo': { 'bar': { 'baz': 'wat', } } } Multiple queries can be specified if comma-separated. For instance, the query "foo.bar.baz,foo.bar.something_else" would return this:: OrderedDict({ "foo.bar.baz": "wat", "foo.bar.something_else": None, }) """ if not isinstance(query, six.string_types): raise ValueError("query must be a string, not %r" % type(query)) def _browse(tokens, d): """ Recurse through a dict to retrieve a value. """ current, rest = tokens[0], tokens[1:] if not rest: return d.get(current, None) if current in d: if isinstance(d[current], dict): return _browse(rest, d[current]) elif rest: return None else: return d[current] keys = [key.strip().split('.') for key in query.split(',')] return OrderedDict([ ('.'.join(tokens), _browse(tokens, dic)) for tokens in keys ])
Query a dict with 'dotted notation'. Returns an OrderedDict. A query of "foo.bar.baz" would retrieve 'wat' from this:: dic = { 'foo': { 'bar': { 'baz': 'wat', } } } Multiple queries can be specified if comma-separated. For instance, the query "foo.bar.baz,foo.bar.something_else" would return this:: OrderedDict({ "foo.bar.baz": "wat", "foo.bar.something_else": None, })
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/utils.py#L139-L183
fedora-infra/fedmsg
fedmsg/utils.py
cowsay_output
def cowsay_output(message): """ Invoke a shell command to print cowsay output. Primary replacement for os.system calls. """ command = 'cowsay "%s"' % message ret = subprocess.Popen( command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, error = ret.communicate() return output, error
python
def cowsay_output(message): """ Invoke a shell command to print cowsay output. Primary replacement for os.system calls. """ command = 'cowsay "%s"' % message ret = subprocess.Popen( command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, error = ret.communicate() return output, error
Invoke a shell command to print cowsay output. Primary replacement for os.system calls.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/utils.py#L186-L195
fedora-infra/fedmsg
fedmsg/encoding/sqla.py
to_json
def to_json(obj, seen=None): """ Returns a dict representation of the object. Recursively evaluates to_json(...) on its relationships. """ if not seen: seen = [] properties = list(class_mapper(type(obj)).iterate_properties) relationships = [ p.key for p in properties if type(p) is RelationshipProperty ] attrs = [ p.key for p in properties if p.key not in relationships ] d = dict([(attr, getattr(obj, attr)) for attr in attrs]) for attr in relationships: d[attr] = expand(obj, getattr(obj, attr), seen) return d
python
def to_json(obj, seen=None): """ Returns a dict representation of the object. Recursively evaluates to_json(...) on its relationships. """ if not seen: seen = [] properties = list(class_mapper(type(obj)).iterate_properties) relationships = [ p.key for p in properties if type(p) is RelationshipProperty ] attrs = [ p.key for p in properties if p.key not in relationships ] d = dict([(attr, getattr(obj, attr)) for attr in attrs]) for attr in relationships: d[attr] = expand(obj, getattr(obj, attr), seen) return d
Returns a dict representation of the object. Recursively evaluates to_json(...) on its relationships.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/encoding/sqla.py#L35-L57
fedora-infra/fedmsg
fedmsg/encoding/sqla.py
expand
def expand(obj, relation, seen): """ Return the to_json or id of a sqlalchemy relationship. """ if hasattr(relation, 'all'): relation = relation.all() if hasattr(relation, '__iter__'): return [expand(obj, item, seen) for item in relation] if type(relation) not in seen: return to_json(relation, seen + [type(obj)]) else: return relation.id
python
def expand(obj, relation, seen): """ Return the to_json or id of a sqlalchemy relationship. """ if hasattr(relation, 'all'): relation = relation.all() if hasattr(relation, '__iter__'): return [expand(obj, item, seen) for item in relation] if type(relation) not in seen: return to_json(relation, seen + [type(obj)]) else: return relation.id
Return the to_json or id of a sqlalchemy relationship.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/encoding/sqla.py#L60-L72
fedora-infra/fedmsg
fedmsg/consumers/relay.py
SigningRelayConsumer.consume
def consume(self, msg): """ Sign the message prior to sending the message. Args: msg (dict): The message to sign and relay. """ msg['body'] = crypto.sign(msg['body'], **self.hub.config) super(SigningRelayConsumer, self).consume(msg)
python
def consume(self, msg): """ Sign the message prior to sending the message. Args: msg (dict): The message to sign and relay. """ msg['body'] = crypto.sign(msg['body'], **self.hub.config) super(SigningRelayConsumer, self).consume(msg)
Sign the message prior to sending the message. Args: msg (dict): The message to sign and relay.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/consumers/relay.py#L81-L89
fedora-infra/fedmsg
fedmsg/consumers/__init__.py
FedmsgConsumer._backlog
def _backlog(self, data): """Find all the datagrepper messages between 'then' and 'now'. Put those on our work queue. Should be called in a thread so as not to block the hub at startup. """ try: data = json.loads(data) except ValueError as e: self.log.info("Status contents are %r" % data) self.log.exception(e) self.log.info("Skipping backlog retrieval.") return last = data['message']['body'] if isinstance(last, str): last = json.loads(last) then = last['timestamp'] now = int(time.time()) retrieved = 0 for message in self.get_datagrepper_results(then, now): # Take the messages from datagrepper and remove any keys that were # artificially added to the message. The presence of these would # otherwise cause message crypto validation to fail. message = fedmsg.crypto.utils.fix_datagrepper_message(message) if message['msg_id'] != last['msg_id']: retrieved = retrieved + 1 self.incoming.put(dict(body=message, topic=message['topic'])) else: self.log.warning("Already seen %r; Skipping." % last['msg_id']) self.log.info("Retrieved %i messages from datagrepper." % retrieved)
python
def _backlog(self, data): """Find all the datagrepper messages between 'then' and 'now'. Put those on our work queue. Should be called in a thread so as not to block the hub at startup. """ try: data = json.loads(data) except ValueError as e: self.log.info("Status contents are %r" % data) self.log.exception(e) self.log.info("Skipping backlog retrieval.") return last = data['message']['body'] if isinstance(last, str): last = json.loads(last) then = last['timestamp'] now = int(time.time()) retrieved = 0 for message in self.get_datagrepper_results(then, now): # Take the messages from datagrepper and remove any keys that were # artificially added to the message. The presence of these would # otherwise cause message crypto validation to fail. message = fedmsg.crypto.utils.fix_datagrepper_message(message) if message['msg_id'] != last['msg_id']: retrieved = retrieved + 1 self.incoming.put(dict(body=message, topic=message['topic'])) else: self.log.warning("Already seen %r; Skipping." % last['msg_id']) self.log.info("Retrieved %i messages from datagrepper." % retrieved)
Find all the datagrepper messages between 'then' and 'now'. Put those on our work queue. Should be called in a thread so as not to block the hub at startup.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/consumers/__init__.py#L161-L197
fedora-infra/fedmsg
fedmsg/consumers/__init__.py
FedmsgConsumer.validate
def validate(self, message): """ Validate the message before the consumer processes it. This needs to raise an exception, caught by moksha. Args: message (dict): The message as a dictionary. This must, at a minimum, contain the 'topic' key with a unicode string value and 'body' key with a dictionary value. However, the message might also be an object with a ``__json__`` method that returns a dict with a 'body' key that can be a unicode string that is JSON-encoded. Raises: RuntimeWarning: If the message is not valid. UnicodeDecodeError: If the message body is not unicode or UTF-8 and also happens to contain invalid UTF-8 binary. """ if hasattr(message, '__json__'): message = message.__json__() if isinstance(message['body'], six.text_type): message['body'] = json.loads(message['body']) elif isinstance(message['body'], six.binary_type): # Try to decode the message body as UTF-8 since it's very likely # that that was the encoding used. This API should eventually only # accept unicode strings inside messages. If a UnicodeDecodeError # happens, let that bubble up. warnings.warn('Message body is not unicode', DeprecationWarning) message['body'] = json.loads(message['body'].decode('utf-8')) # Massage STOMP messages into a more compatible format. if 'topic' not in message['body']: message['body'] = { 'topic': message.get('topic'), 'msg': message['body'], } # If we're not validating, then everything is valid. # If this is turned on globally, our child class can override it. if not self.validate_signatures: return # We assume these match inside fedmsg.crypto, so we should enforce it. if not message['topic'] == message['body']['topic']: raise RuntimeWarning("Topic envelope mismatch.") if not fedmsg.crypto.validate(message['body'], **self.hub.config): raise RuntimeWarning("Failed to authn message.")
python
def validate(self, message): """ Validate the message before the consumer processes it. This needs to raise an exception, caught by moksha. Args: message (dict): The message as a dictionary. This must, at a minimum, contain the 'topic' key with a unicode string value and 'body' key with a dictionary value. However, the message might also be an object with a ``__json__`` method that returns a dict with a 'body' key that can be a unicode string that is JSON-encoded. Raises: RuntimeWarning: If the message is not valid. UnicodeDecodeError: If the message body is not unicode or UTF-8 and also happens to contain invalid UTF-8 binary. """ if hasattr(message, '__json__'): message = message.__json__() if isinstance(message['body'], six.text_type): message['body'] = json.loads(message['body']) elif isinstance(message['body'], six.binary_type): # Try to decode the message body as UTF-8 since it's very likely # that that was the encoding used. This API should eventually only # accept unicode strings inside messages. If a UnicodeDecodeError # happens, let that bubble up. warnings.warn('Message body is not unicode', DeprecationWarning) message['body'] = json.loads(message['body'].decode('utf-8')) # Massage STOMP messages into a more compatible format. if 'topic' not in message['body']: message['body'] = { 'topic': message.get('topic'), 'msg': message['body'], } # If we're not validating, then everything is valid. # If this is turned on globally, our child class can override it. if not self.validate_signatures: return # We assume these match inside fedmsg.crypto, so we should enforce it. if not message['topic'] == message['body']['topic']: raise RuntimeWarning("Topic envelope mismatch.") if not fedmsg.crypto.validate(message['body'], **self.hub.config): raise RuntimeWarning("Failed to authn message.")
Validate the message before the consumer processes it. This needs to raise an exception, caught by moksha. Args: message (dict): The message as a dictionary. This must, at a minimum, contain the 'topic' key with a unicode string value and 'body' key with a dictionary value. However, the message might also be an object with a ``__json__`` method that returns a dict with a 'body' key that can be a unicode string that is JSON-encoded. Raises: RuntimeWarning: If the message is not valid. UnicodeDecodeError: If the message body is not unicode or UTF-8 and also happens to contain invalid UTF-8 binary.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/consumers/__init__.py#L224-L271
fedora-infra/fedmsg
fedmsg/consumers/__init__.py
FedmsgConsumer._consume
def _consume(self, message): """ Called when a message is consumed. This private method handles some administrative setup and teardown before calling the public interface `consume` typically implemented by a subclass. When `moksha.blocking_mode` is set to `False` in the config, this method always returns `None`. The argued message is stored in an internal queue where the consumer's worker threads should eventually pick it up. When `moksha.blocking_mode` is set to `True` in the config, this method should return True or False, indicating whether the message was handled or not. Specifically, in the event that the inner `consume` method raises an exception of any kind, this method should return `False` indicating that the message was not successfully handled. Args: message (dict): The message as a dictionary. Returns: bool: Should be interpreted as whether or not the message was handled by the consumer, or `None` if `moksha.blocking_mode` is set to False. """ try: self.validate(message) except RuntimeWarning as e: self.log.warn("Received invalid message {0}".format(e)) return # Pass along headers if present. May be useful to filters or # fedmsg.meta routines. if isinstance(message, dict) and 'headers' in message and 'body' in message: message['body']['headers'] = message['headers'] if hasattr(self, "replay_name"): for m in check_for_replay( self.replay_name, self.name_to_seq_id, message, self.hub.config): try: self.validate(m) return super(FedmsgConsumer, self)._consume(m) except RuntimeWarning as e: self.log.warn("Received invalid message {}".format(e)) else: return super(FedmsgConsumer, self)._consume(message)
python
def _consume(self, message): """ Called when a message is consumed. This private method handles some administrative setup and teardown before calling the public interface `consume` typically implemented by a subclass. When `moksha.blocking_mode` is set to `False` in the config, this method always returns `None`. The argued message is stored in an internal queue where the consumer's worker threads should eventually pick it up. When `moksha.blocking_mode` is set to `True` in the config, this method should return True or False, indicating whether the message was handled or not. Specifically, in the event that the inner `consume` method raises an exception of any kind, this method should return `False` indicating that the message was not successfully handled. Args: message (dict): The message as a dictionary. Returns: bool: Should be interpreted as whether or not the message was handled by the consumer, or `None` if `moksha.blocking_mode` is set to False. """ try: self.validate(message) except RuntimeWarning as e: self.log.warn("Received invalid message {0}".format(e)) return # Pass along headers if present. May be useful to filters or # fedmsg.meta routines. if isinstance(message, dict) and 'headers' in message and 'body' in message: message['body']['headers'] = message['headers'] if hasattr(self, "replay_name"): for m in check_for_replay( self.replay_name, self.name_to_seq_id, message, self.hub.config): try: self.validate(m) return super(FedmsgConsumer, self)._consume(m) except RuntimeWarning as e: self.log.warn("Received invalid message {}".format(e)) else: return super(FedmsgConsumer, self)._consume(message)
Called when a message is consumed. This private method handles some administrative setup and teardown before calling the public interface `consume` typically implemented by a subclass. When `moksha.blocking_mode` is set to `False` in the config, this method always returns `None`. The argued message is stored in an internal queue where the consumer's worker threads should eventually pick it up. When `moksha.blocking_mode` is set to `True` in the config, this method should return True or False, indicating whether the message was handled or not. Specifically, in the event that the inner `consume` method raises an exception of any kind, this method should return `False` indicating that the message was not successfully handled. Args: message (dict): The message as a dictionary. Returns: bool: Should be interpreted as whether or not the message was handled by the consumer, or `None` if `moksha.blocking_mode` is set to False.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/consumers/__init__.py#L273-L323
kenneth-reitz/args
args.py
ArgsList.remove
def remove(self, x): """Removes given arg (or list thereof) from Args object.""" def _remove(x): found = self.first(x) if found is not None: self._args.pop(found) if _is_collection(x): for item in x: _remove(x) else: _remove(x)
python
def remove(self, x): """Removes given arg (or list thereof) from Args object.""" def _remove(x): found = self.first(x) if found is not None: self._args.pop(found) if _is_collection(x): for item in x: _remove(x) else: _remove(x)
Removes given arg (or list thereof) from Args object.
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L88-L100
kenneth-reitz/args
args.py
ArgsList.first
def first(self, x): """Returns first found index of given value (or list of values).""" def _find(x): try: return self.all.index(str(x)) except ValueError: return None if _is_collection(x): for item in x: found = _find(item) if found is not None: return found return None else: return _find(x)
python
def first(self, x): """Returns first found index of given value (or list of values).""" def _find(x): try: return self.all.index(str(x)) except ValueError: return None if _is_collection(x): for item in x: found = _find(item) if found is not None: return found return None else: return _find(x)
Returns first found index of given value (or list of values).
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L121-L137
kenneth-reitz/args
args.py
ArgsList.start_with
def start_with(self, x): """Returns all arguments beginning with given string (or list thereof). """ _args = [] for arg in self.all: if _is_collection(x): for _x in x: if arg.startswith(x): _args.append(arg) break else: if arg.startswith(x): _args.append(arg) return ArgsList(_args, no_argv=True)
python
def start_with(self, x): """Returns all arguments beginning with given string (or list thereof). """ _args = [] for arg in self.all: if _is_collection(x): for _x in x: if arg.startswith(x): _args.append(arg) break else: if arg.startswith(x): _args.append(arg) return ArgsList(_args, no_argv=True)
Returns all arguments beginning with given string (or list thereof).
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L181-L198
kenneth-reitz/args
args.py
ArgsList.all_without
def all_without(self, x): """Returns all arguments not containing given string (or list thereof). """ _args = [] for arg in self.all: if _is_collection(x): for _x in x: if _x not in arg: _args.append(arg) break else: if x not in arg: _args.append(arg) return ArgsList(_args, no_argv=True)
python
def all_without(self, x): """Returns all arguments not containing given string (or list thereof). """ _args = [] for arg in self.all: if _is_collection(x): for _x in x: if _x not in arg: _args.append(arg) break else: if x not in arg: _args.append(arg) return ArgsList(_args, no_argv=True)
Returns all arguments not containing given string (or list thereof).
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L297-L314
kenneth-reitz/args
args.py
ArgsList.files
def files(self, absolute=False): """Returns an expanded list of all valid paths that were passed in.""" _paths = [] for arg in self.all: for path in _expand_path(arg): if os.path.exists(path): if absolute: _paths.append(os.path.abspath(path)) else: _paths.append(path) return _paths
python
def files(self, absolute=False): """Returns an expanded list of all valid paths that were passed in.""" _paths = [] for arg in self.all: for path in _expand_path(arg): if os.path.exists(path): if absolute: _paths.append(os.path.abspath(path)) else: _paths.append(path) return _paths
Returns an expanded list of all valid paths that were passed in.
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L329-L342
kenneth-reitz/args
args.py
ArgsList.not_files
def not_files(self): """Returns a list of all arguments that aren't files/globs.""" _args = [] for arg in self.all: if not len(_expand_path(arg)): if not os.path.exists(arg): _args.append(arg) return ArgsList(_args, no_argv=True)
python
def not_files(self): """Returns a list of all arguments that aren't files/globs.""" _args = [] for arg in self.all: if not len(_expand_path(arg)): if not os.path.exists(arg): _args.append(arg) return ArgsList(_args, no_argv=True)
Returns a list of all arguments that aren't files/globs.
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L345-L355
kenneth-reitz/args
args.py
ArgsList.assignments
def assignments(self): """Extracts assignment values from assignments.""" collection = OrderedDict() for arg in self.all: if '=' in arg: collection.setdefault( arg.split('=', 1)[0], ArgsList(no_argv=True)) collection[arg.split('=', 1)[0]]._args.append( arg.split('=', 1)[1]) return collection
python
def assignments(self): """Extracts assignment values from assignments.""" collection = OrderedDict() for arg in self.all: if '=' in arg: collection.setdefault( arg.split('=', 1)[0], ArgsList(no_argv=True)) collection[arg.split('=', 1)[0]]._args.append( arg.split('=', 1)[1]) return collection
Extracts assignment values from assignments.
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L364-L376
fedora-infra/fedmsg
fedmsg/crypto/gpg.py
sign
def sign(message, gpg_home=None, gpg_signing_key=None, **config): """ Insert a new field into the message dict and return it. The new field is: - 'signature' - the computed GPG message digest of the JSON repr of the `msg` field. """ if gpg_home is None or gpg_signing_key is None: raise ValueError("You must set the gpg_home \ and gpg_signing_key keyword arguments.") message['crypto'] = 'gpg' signature = _ctx.sign( fedmsg.encoding.dumps(message['msg']), gpg_signing_key, homedir=gpg_home ) return dict(list(message.items()) + [('signature', b64encode(signature))])
python
def sign(message, gpg_home=None, gpg_signing_key=None, **config): """ Insert a new field into the message dict and return it. The new field is: - 'signature' - the computed GPG message digest of the JSON repr of the `msg` field. """ if gpg_home is None or gpg_signing_key is None: raise ValueError("You must set the gpg_home \ and gpg_signing_key keyword arguments.") message['crypto'] = 'gpg' signature = _ctx.sign( fedmsg.encoding.dumps(message['msg']), gpg_signing_key, homedir=gpg_home ) return dict(list(message.items()) + [('signature', b64encode(signature))])
Insert a new field into the message dict and return it. The new field is: - 'signature' - the computed GPG message digest of the JSON repr of the `msg` field.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/gpg.py#L157-L177
fedora-infra/fedmsg
fedmsg/crypto/gpg.py
validate
def validate(message, gpg_home=None, **config): """ Return true or false if the message is signed appropriately. Two things must be true: 1) The signature must be valid (obviously) 2) The signing key must be in the local keyring as defined by the `gpg_home` config value. """ if gpg_home is None: raise ValueError("You must set the gpg_home keyword argument.") try: _ctx.verify( fedmsg.encoding.dumps(message['msg']), b64decode(message['signature']), homedir=gpg_home ) return True except GpgBinaryError: log.warn("Failed validation. {0}".format(six.text_type(message))) return False
python
def validate(message, gpg_home=None, **config): """ Return true or false if the message is signed appropriately. Two things must be true: 1) The signature must be valid (obviously) 2) The signing key must be in the local keyring as defined by the `gpg_home` config value. """ if gpg_home is None: raise ValueError("You must set the gpg_home keyword argument.") try: _ctx.verify( fedmsg.encoding.dumps(message['msg']), b64decode(message['signature']), homedir=gpg_home ) return True except GpgBinaryError: log.warn("Failed validation. {0}".format(six.text_type(message))) return False
Return true or false if the message is signed appropriately. Two things must be true: 1) The signature must be valid (obviously) 2) The signing key must be in the local keyring as defined by the `gpg_home` config value.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/gpg.py#L180-L200
fedora-infra/fedmsg
fedmsg/crypto/gpg.py
Context.verify
def verify(self, data, signature=None, keyrings=None, homedir=None): ''' `data` <string> the data to verify. `signature` <string> The signature, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir. ''' if isinstance(data, six.text_type): data = data.encode('utf-8') tmpdir = tempfile.mkdtemp() data_file, data_path = tempfile.mkstemp(dir=tmpdir) data_file = os.fdopen(data_file, 'wb') data_file.write(data) data_file.close() if signature: sig_file, sig_path = tempfile.mkstemp(dir=tmpdir) sig_file = os.fdopen(sig_file, 'wb') sig_file.write(signature) sig_file.close() else: sig_path = None try: return self.verify_from_file( data_path, sig_path=sig_path, keyrings=keyrings, homedir=homedir ) finally: shutil.rmtree(tmpdir)
python
def verify(self, data, signature=None, keyrings=None, homedir=None): ''' `data` <string> the data to verify. `signature` <string> The signature, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir. ''' if isinstance(data, six.text_type): data = data.encode('utf-8') tmpdir = tempfile.mkdtemp() data_file, data_path = tempfile.mkstemp(dir=tmpdir) data_file = os.fdopen(data_file, 'wb') data_file.write(data) data_file.close() if signature: sig_file, sig_path = tempfile.mkstemp(dir=tmpdir) sig_file = os.fdopen(sig_file, 'wb') sig_file.write(signature) sig_file.close() else: sig_path = None try: return self.verify_from_file( data_path, sig_path=sig_path, keyrings=keyrings, homedir=homedir ) finally: shutil.rmtree(tmpdir)
`data` <string> the data to verify. `signature` <string> The signature, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/gpg.py#L56-L87
fedora-infra/fedmsg
fedmsg/crypto/gpg.py
Context.verify_from_file
def verify_from_file(self, data_path, sig_path=None, keyrings=None, homedir=None): ''' `data_path` <string> The path to the data to verify. `sig_path` <string> The signature file, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir. ''' cmd_line = ['gpg', '--homedir', homedir or self.homedir] cmd_line.extend(self._get_keyrings_cl(keyrings)) cmd_line.append('--verify') if sig_path: cmd_line.extend([sig_path, data_path]) else: cmd_line.append(data_path) p = subprocess.Popen(cmd_line, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode: raise GpgBinaryError(stderr) return True
python
def verify_from_file(self, data_path, sig_path=None, keyrings=None, homedir=None): ''' `data_path` <string> The path to the data to verify. `sig_path` <string> The signature file, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir. ''' cmd_line = ['gpg', '--homedir', homedir or self.homedir] cmd_line.extend(self._get_keyrings_cl(keyrings)) cmd_line.append('--verify') if sig_path: cmd_line.extend([sig_path, data_path]) else: cmd_line.append(data_path) p = subprocess.Popen(cmd_line, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode: raise GpgBinaryError(stderr) return True
`data_path` <string> The path to the data to verify. `sig_path` <string> The signature file, if detached from the data. `keyrings` <list of string> Additional keyrings to search in. `homedir` <string> Override the configured homedir.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/gpg.py#L89-L110
fedora-infra/fedmsg
fedmsg/core.py
FedMsgContext.destroy
def destroy(self): """ Destroy a fedmsg context """ if getattr(self, 'publisher', None): self.log.debug("closing fedmsg publisher") self.log.debug("sent %i messages" % self._i) self.publisher.close() self.publisher = None if getattr(self, 'context', None): self.context.term() self.context = None
python
def destroy(self): """ Destroy a fedmsg context """ if getattr(self, 'publisher', None): self.log.debug("closing fedmsg publisher") self.log.debug("sent %i messages" % self._i) self.publisher.close() self.publisher = None if getattr(self, 'context', None): self.context.term() self.context = None
Destroy a fedmsg context
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/core.py#L173-L184
fedora-infra/fedmsg
fedmsg/core.py
FedMsgContext.publish
def publish(self, topic=None, msg=None, modname=None, pre_fire_hook=None, **kw): """ Send a message over the publishing zeromq socket. >>> import fedmsg >>> fedmsg.publish(topic='testing', modname='test', msg={ ... 'test': "Hello World", ... }) The above snippet will send the message ``'{test: "Hello World"}'`` over the ``<topic_prefix>.dev.test.testing`` topic. The fully qualified topic of a message is constructed out of the following pieces: <:ref:`conf-topic-prefix`>.<:ref:`conf-environment`>.<``modname``>.<``topic``> This function (and other API functions) do a little bit more heavy lifting than they let on. If the "zeromq context" is not yet initialized, :func:`fedmsg.init` is called to construct it and store it as :data:`fedmsg.__local.__context` before anything else is done. **An example from Fedora Tagger -- SQLAlchemy encoding** Here's an example from `fedora-tagger <https://github.com/fedora-infra/fedora-tagger>`_ that sends the information about a new tag over ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``:: >>> import fedmsg >>> fedmsg.publish(topic='tag.update', msg={ ... 'user': user, ... 'tag': tag, ... }) Note that the `tag` and `user` objects are SQLAlchemy objects defined by tagger. They both have ``.__json__()`` methods which :func:`fedmsg.publish` uses to encode both objects as stringified JSON for you. Under the hood, specifically, ``.publish`` uses :mod:`fedmsg.encoding` to do this. ``fedmsg`` has also guessed the module name (``modname``) of it's caller and inserted it into the topic for you. The code from which we stole the above snippet lives in ``fedoratagger.controllers.root``. ``fedmsg`` figured that out and stripped it down to just ``fedoratagger`` for the final topic of ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``. **Shell Usage** You could also use the ``fedmsg-logger`` from a shell script like so:: $ echo "Hello, world." | fedmsg-logger --topic testing $ echo '{"foo": "bar"}' | fedmsg-logger --json-input :param topic: The message topic suffix. This suffix is joined to the configured topic prefix (e.g. ``org.fedoraproject``), environment (e.g. ``prod``, ``dev``, etc.), and modname. :type topic: unicode :param msg: A message to publish. This message will be JSON-encoded prior to being sent, so the object must be composed of JSON- serializable data types. Please note that if this is already a string JSON serialization will be applied to that string. :type msg: dict :param modname: The module name that is publishing the message. If this is omitted, ``fedmsg`` will try to guess the name of the module that called it and use that to produce an intelligent topic. Specifying ``modname`` explicitly overrides this behavior. :type modname: unicode :param pre_fire_hook: A callable that will be called with a single argument -- the dict of the constructed message -- just before it is handed off to ZeroMQ for publication. :type pre_fire_hook: function """ topic = topic or 'unspecified' msg = msg or dict() # If no modname is supplied, then guess it from the call stack. modname = modname or guess_calling_module(default="fedmsg") topic = '.'.join([modname, topic]) if topic[:len(self.c['topic_prefix'])] != self.c['topic_prefix']: topic = '.'.join([ self.c['topic_prefix'], self.c['environment'], topic, ]) if isinstance(topic, six.text_type): topic = to_bytes(topic, encoding='utf8', nonstring="passthru") year = datetime.datetime.now().year self._i += 1 msg = dict( topic=topic.decode('utf-8'), msg=msg, timestamp=int(time.time()), msg_id=str(year) + '-' + str(uuid.uuid4()), i=self._i, username=getpass.getuser(), ) # Find my message-signing cert if I need one. if self.c.get('sign_messages', False): if not self.c.get("crypto_backend") == "gpg": if 'cert_prefix' in self.c: cert_index = "%s.%s" % (self.c['cert_prefix'], self.hostname) else: cert_index = self.c['name'] if cert_index == 'relay_inbound': cert_index = "shell.%s" % self.hostname self.c['certname'] = self.c['certnames'][cert_index] else: if 'gpg_signing_key' not in self.c: self.c['gpg_signing_key'] = self.c['gpg_keys'][self.hostname] if self.c.get('sign_messages', False): msg = fedmsg.crypto.sign(msg, **self.c) store = self.c.get('persistent_store', None) if store: # Add the seq_id field msg = store.add(msg) if pre_fire_hook: pre_fire_hook(msg) # We handle zeromq publishing ourselves. But, if that is disabled, # defer to the moksha' hub's twisted reactor to send messages (if # available). if self.c.get('zmq_enabled', True): self.publisher.send_multipart( [topic, fedmsg.encoding.dumps(msg).encode('utf-8')], flags=zmq.NOBLOCK, ) else: # Perhaps we're using STOMP or AMQP? Let moksha handle it. import moksha.hub # First, a quick sanity check. if not getattr(moksha.hub, '_hub', None): raise AttributeError("Unable to publish non-zeromq msg " "without moksha-hub initialization.") # Let moksha.hub do our work. moksha.hub._hub.send_message( topic=topic, message=fedmsg.encoding.dumps(msg).encode('utf-8'), jsonify=False, )
python
def publish(self, topic=None, msg=None, modname=None, pre_fire_hook=None, **kw): """ Send a message over the publishing zeromq socket. >>> import fedmsg >>> fedmsg.publish(topic='testing', modname='test', msg={ ... 'test': "Hello World", ... }) The above snippet will send the message ``'{test: "Hello World"}'`` over the ``<topic_prefix>.dev.test.testing`` topic. The fully qualified topic of a message is constructed out of the following pieces: <:ref:`conf-topic-prefix`>.<:ref:`conf-environment`>.<``modname``>.<``topic``> This function (and other API functions) do a little bit more heavy lifting than they let on. If the "zeromq context" is not yet initialized, :func:`fedmsg.init` is called to construct it and store it as :data:`fedmsg.__local.__context` before anything else is done. **An example from Fedora Tagger -- SQLAlchemy encoding** Here's an example from `fedora-tagger <https://github.com/fedora-infra/fedora-tagger>`_ that sends the information about a new tag over ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``:: >>> import fedmsg >>> fedmsg.publish(topic='tag.update', msg={ ... 'user': user, ... 'tag': tag, ... }) Note that the `tag` and `user` objects are SQLAlchemy objects defined by tagger. They both have ``.__json__()`` methods which :func:`fedmsg.publish` uses to encode both objects as stringified JSON for you. Under the hood, specifically, ``.publish`` uses :mod:`fedmsg.encoding` to do this. ``fedmsg`` has also guessed the module name (``modname``) of it's caller and inserted it into the topic for you. The code from which we stole the above snippet lives in ``fedoratagger.controllers.root``. ``fedmsg`` figured that out and stripped it down to just ``fedoratagger`` for the final topic of ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``. **Shell Usage** You could also use the ``fedmsg-logger`` from a shell script like so:: $ echo "Hello, world." | fedmsg-logger --topic testing $ echo '{"foo": "bar"}' | fedmsg-logger --json-input :param topic: The message topic suffix. This suffix is joined to the configured topic prefix (e.g. ``org.fedoraproject``), environment (e.g. ``prod``, ``dev``, etc.), and modname. :type topic: unicode :param msg: A message to publish. This message will be JSON-encoded prior to being sent, so the object must be composed of JSON- serializable data types. Please note that if this is already a string JSON serialization will be applied to that string. :type msg: dict :param modname: The module name that is publishing the message. If this is omitted, ``fedmsg`` will try to guess the name of the module that called it and use that to produce an intelligent topic. Specifying ``modname`` explicitly overrides this behavior. :type modname: unicode :param pre_fire_hook: A callable that will be called with a single argument -- the dict of the constructed message -- just before it is handed off to ZeroMQ for publication. :type pre_fire_hook: function """ topic = topic or 'unspecified' msg = msg or dict() # If no modname is supplied, then guess it from the call stack. modname = modname or guess_calling_module(default="fedmsg") topic = '.'.join([modname, topic]) if topic[:len(self.c['topic_prefix'])] != self.c['topic_prefix']: topic = '.'.join([ self.c['topic_prefix'], self.c['environment'], topic, ]) if isinstance(topic, six.text_type): topic = to_bytes(topic, encoding='utf8', nonstring="passthru") year = datetime.datetime.now().year self._i += 1 msg = dict( topic=topic.decode('utf-8'), msg=msg, timestamp=int(time.time()), msg_id=str(year) + '-' + str(uuid.uuid4()), i=self._i, username=getpass.getuser(), ) # Find my message-signing cert if I need one. if self.c.get('sign_messages', False): if not self.c.get("crypto_backend") == "gpg": if 'cert_prefix' in self.c: cert_index = "%s.%s" % (self.c['cert_prefix'], self.hostname) else: cert_index = self.c['name'] if cert_index == 'relay_inbound': cert_index = "shell.%s" % self.hostname self.c['certname'] = self.c['certnames'][cert_index] else: if 'gpg_signing_key' not in self.c: self.c['gpg_signing_key'] = self.c['gpg_keys'][self.hostname] if self.c.get('sign_messages', False): msg = fedmsg.crypto.sign(msg, **self.c) store = self.c.get('persistent_store', None) if store: # Add the seq_id field msg = store.add(msg) if pre_fire_hook: pre_fire_hook(msg) # We handle zeromq publishing ourselves. But, if that is disabled, # defer to the moksha' hub's twisted reactor to send messages (if # available). if self.c.get('zmq_enabled', True): self.publisher.send_multipart( [topic, fedmsg.encoding.dumps(msg).encode('utf-8')], flags=zmq.NOBLOCK, ) else: # Perhaps we're using STOMP or AMQP? Let moksha handle it. import moksha.hub # First, a quick sanity check. if not getattr(moksha.hub, '_hub', None): raise AttributeError("Unable to publish non-zeromq msg " "without moksha-hub initialization.") # Let moksha.hub do our work. moksha.hub._hub.send_message( topic=topic, message=fedmsg.encoding.dumps(msg).encode('utf-8'), jsonify=False, )
Send a message over the publishing zeromq socket. >>> import fedmsg >>> fedmsg.publish(topic='testing', modname='test', msg={ ... 'test': "Hello World", ... }) The above snippet will send the message ``'{test: "Hello World"}'`` over the ``<topic_prefix>.dev.test.testing`` topic. The fully qualified topic of a message is constructed out of the following pieces: <:ref:`conf-topic-prefix`>.<:ref:`conf-environment`>.<``modname``>.<``topic``> This function (and other API functions) do a little bit more heavy lifting than they let on. If the "zeromq context" is not yet initialized, :func:`fedmsg.init` is called to construct it and store it as :data:`fedmsg.__local.__context` before anything else is done. **An example from Fedora Tagger -- SQLAlchemy encoding** Here's an example from `fedora-tagger <https://github.com/fedora-infra/fedora-tagger>`_ that sends the information about a new tag over ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``:: >>> import fedmsg >>> fedmsg.publish(topic='tag.update', msg={ ... 'user': user, ... 'tag': tag, ... }) Note that the `tag` and `user` objects are SQLAlchemy objects defined by tagger. They both have ``.__json__()`` methods which :func:`fedmsg.publish` uses to encode both objects as stringified JSON for you. Under the hood, specifically, ``.publish`` uses :mod:`fedmsg.encoding` to do this. ``fedmsg`` has also guessed the module name (``modname``) of it's caller and inserted it into the topic for you. The code from which we stole the above snippet lives in ``fedoratagger.controllers.root``. ``fedmsg`` figured that out and stripped it down to just ``fedoratagger`` for the final topic of ``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``. **Shell Usage** You could also use the ``fedmsg-logger`` from a shell script like so:: $ echo "Hello, world." | fedmsg-logger --topic testing $ echo '{"foo": "bar"}' | fedmsg-logger --json-input :param topic: The message topic suffix. This suffix is joined to the configured topic prefix (e.g. ``org.fedoraproject``), environment (e.g. ``prod``, ``dev``, etc.), and modname. :type topic: unicode :param msg: A message to publish. This message will be JSON-encoded prior to being sent, so the object must be composed of JSON- serializable data types. Please note that if this is already a string JSON serialization will be applied to that string. :type msg: dict :param modname: The module name that is publishing the message. If this is omitted, ``fedmsg`` will try to guess the name of the module that called it and use that to produce an intelligent topic. Specifying ``modname`` explicitly overrides this behavior. :type modname: unicode :param pre_fire_hook: A callable that will be called with a single argument -- the dict of the constructed message -- just before it is handed off to ZeroMQ for publication. :type pre_fire_hook: function
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/core.py#L192-L343
fedora-infra/fedmsg
fedmsg/core.py
FedMsgContext.tail_messages
def tail_messages(self, topic="", passive=False, **kw): """ Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`. Args: topic (six.text_type): The topic to subscribe to. The default is to subscribe to all topics. passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets instead of connecting to them. Defaults to ``False``. **kw: Additional keyword arguments. Currently none are used. Yields: tuple: A 4-tuple in the form (name, endpoint, topic, message). """ if not self.c.get('zmq_enabled', True): raise ValueError("fedmsg.tail_messages() is only available for " "zeromq. Use the hub-consumer approach for " "STOMP or AMQP support.") poller, subs = self._create_poller(topic=topic, passive=False, **kw) try: for msg in self._poll(poller, subs): yield msg finally: self._close_subs(subs)
python
def tail_messages(self, topic="", passive=False, **kw): """ Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`. Args: topic (six.text_type): The topic to subscribe to. The default is to subscribe to all topics. passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets instead of connecting to them. Defaults to ``False``. **kw: Additional keyword arguments. Currently none are used. Yields: tuple: A 4-tuple in the form (name, endpoint, topic, message). """ if not self.c.get('zmq_enabled', True): raise ValueError("fedmsg.tail_messages() is only available for " "zeromq. Use the hub-consumer approach for " "STOMP or AMQP support.") poller, subs = self._create_poller(topic=topic, passive=False, **kw) try: for msg in self._poll(poller, subs): yield msg finally: self._close_subs(subs)
Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`. Args: topic (six.text_type): The topic to subscribe to. The default is to subscribe to all topics. passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets instead of connecting to them. Defaults to ``False``. **kw: Additional keyword arguments. Currently none are used. Yields: tuple: A 4-tuple in the form (name, endpoint, topic, message).
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/core.py#L345-L370
fedora-infra/fedmsg
fedmsg/crypto/x509_ng.py
sign
def sign(message, ssldir=None, certname=None, **config): """Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed RSA message digest of the JSON repr. - 'certificate' - the base64 X509 certificate of the sending host. Arg: message (dict): An unsigned message to sign. ssldir (str): The absolute path to the directory containing the SSL certificates to use. certname (str): The name of the key pair to sign the message with. This corresponds to the filenames within ``ssldir`` sans prefixes. The key pair must be named ``<certname>.key`` and ``<certname>.crt`` Returns: dict: The signed message. """ if ssldir is None or certname is None: error = "You must set the ssldir and certname keyword arguments." raise ValueError(error) message['crypto'] = 'x509' with open("%s/%s.key" % (ssldir, certname), "rb") as f: rsa_private = serialization.load_pem_private_key( data=f.read(), password=None, backend=default_backend() ) signature = rsa_private.sign( fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1(), ) with open("%s/%s.crt" % (ssldir, certname), "rb") as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) cert_pem = cert.public_bytes(serialization.Encoding.PEM) return _prep_crypto_msg(dict(list(message.items()) + [ ('signature', base64.b64encode(signature).decode('ascii')), ('certificate', base64.b64encode(cert_pem).decode('ascii')), ]))
python
def sign(message, ssldir=None, certname=None, **config): """Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed RSA message digest of the JSON repr. - 'certificate' - the base64 X509 certificate of the sending host. Arg: message (dict): An unsigned message to sign. ssldir (str): The absolute path to the directory containing the SSL certificates to use. certname (str): The name of the key pair to sign the message with. This corresponds to the filenames within ``ssldir`` sans prefixes. The key pair must be named ``<certname>.key`` and ``<certname>.crt`` Returns: dict: The signed message. """ if ssldir is None or certname is None: error = "You must set the ssldir and certname keyword arguments." raise ValueError(error) message['crypto'] = 'x509' with open("%s/%s.key" % (ssldir, certname), "rb") as f: rsa_private = serialization.load_pem_private_key( data=f.read(), password=None, backend=default_backend() ) signature = rsa_private.sign( fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1(), ) with open("%s/%s.crt" % (ssldir, certname), "rb") as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) cert_pem = cert.public_bytes(serialization.Encoding.PEM) return _prep_crypto_msg(dict(list(message.items()) + [ ('signature', base64.b64encode(signature).decode('ascii')), ('certificate', base64.b64encode(cert_pem).decode('ascii')), ]))
Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed RSA message digest of the JSON repr. - 'certificate' - the base64 X509 certificate of the sending host. Arg: message (dict): An unsigned message to sign. ssldir (str): The absolute path to the directory containing the SSL certificates to use. certname (str): The name of the key pair to sign the message with. This corresponds to the filenames within ``ssldir`` sans prefixes. The key pair must be named ``<certname>.key`` and ``<certname>.crt`` Returns: dict: The signed message.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/x509_ng.py#L53-L97
fedora-infra/fedmsg
fedmsg/crypto/x509_ng.py
_prep_crypto_msg
def _prep_crypto_msg(message): """Split the signature and certificate in the same way M2Crypto does. M2Crypto is dropping newlines into its signature and certificate. This exists purely to maintain backwards compatibility. Args: message (dict): A message with the ``signature`` and ``certificate`` keywords. The values of these two keys must be byte strings. Returns: dict: The same message, but with the values of ``signature`` and ``certificate`` split every 76 characters with a newline and a final newline at the end. """ signature = message['signature'] certificate = message['certificate'] sliced_signature, sliced_certificate = [], [] for x in range(0, len(signature), 76): sliced_signature.append(signature[x:x+76]) for x in range(0, len(certificate), 76): sliced_certificate.append(certificate[x:x+76]) message['signature'] = u'\n'.join(sliced_signature) + u'\n' message['certificate'] = u'\n'.join(sliced_certificate) + u'\n' return message
python
def _prep_crypto_msg(message): """Split the signature and certificate in the same way M2Crypto does. M2Crypto is dropping newlines into its signature and certificate. This exists purely to maintain backwards compatibility. Args: message (dict): A message with the ``signature`` and ``certificate`` keywords. The values of these two keys must be byte strings. Returns: dict: The same message, but with the values of ``signature`` and ``certificate`` split every 76 characters with a newline and a final newline at the end. """ signature = message['signature'] certificate = message['certificate'] sliced_signature, sliced_certificate = [], [] for x in range(0, len(signature), 76): sliced_signature.append(signature[x:x+76]) for x in range(0, len(certificate), 76): sliced_certificate.append(certificate[x:x+76]) message['signature'] = u'\n'.join(sliced_signature) + u'\n' message['certificate'] = u'\n'.join(sliced_certificate) + u'\n' return message
Split the signature and certificate in the same way M2Crypto does. M2Crypto is dropping newlines into its signature and certificate. This exists purely to maintain backwards compatibility. Args: message (dict): A message with the ``signature`` and ``certificate`` keywords. The values of these two keys must be byte strings. Returns: dict: The same message, but with the values of ``signature`` and ``certificate`` split every 76 characters with a newline and a final newline at the end.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/x509_ng.py#L100-L124
fedora-infra/fedmsg
fedmsg/crypto/x509_ng.py
validate
def validate(message, ssldir=None, **config): """ Validate the signature on the given message. Four things must be true for the signature to be valid: 1) The X.509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X.509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict. Args: message (dict): A signed message in need of validation. A signed message contains the 'signature' and 'certificate' keys. ssldir (str): The path to the directory containing PEM-encoded X.509 key pairs. Returns: bool: True of the message passes validation, False otherwise. """ for field in ['signature', 'certificate']: if field not in message: _log.warn('No %s field found.', field) return False if not isinstance(message[field], six.text_type): _log.error('msg[%r] is not a unicode string' % field) try: # Make an effort to decode it, it's very likely utf-8 since that's what # is hardcoded throughout fedmsg. Worst case scenario is it'll cause a # validation error when there shouldn't be one. message[field] = message[field].decode('utf-8') except UnicodeError as e: _log.error("Unable to decode the message '%s' field: %s", field, str(e)) return False signature = base64.b64decode(message['signature']) certificate = base64.b64decode(message['certificate']) message = fedmsg.crypto.strip_credentials(message) # Unfortunately we can't change this defaulting to Fedora behavior until # fedmsg-2.0 ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt') crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem') try: ca_certificate, crl = utils.load_certificates(ca_location, crl_location) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: # Maybe the CA/CRL is expired or just rotated, so invalidate the cache and try again try: ca_certificate, crl = utils.load_certificates( ca_location, crl_location, invalidate_cache=True) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: _log.error(str(e)) return False # Validate the signature of the message itself try: crypto_certificate = x509.load_pem_x509_certificate(certificate, default_backend()) crypto_certificate.public_key().verify( signature, fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1(), ) except InvalidSignature as e: _log.error('message [{m}] has an invalid signature: {e}'.format( m=message, e=str(e))) return False # Step 4, check that the certificate is permitted to emit messages for the # topic. common_name = crypto_certificate.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) common_name = common_name[0] routing_policy = config.get('routing_policy', {}) nitpicky = config.get('routing_nitpicky', False) return utils.validate_policy( message.get('topic'), common_name.value, routing_policy, nitpicky=nitpicky)
python
def validate(message, ssldir=None, **config): """ Validate the signature on the given message. Four things must be true for the signature to be valid: 1) The X.509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X.509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict. Args: message (dict): A signed message in need of validation. A signed message contains the 'signature' and 'certificate' keys. ssldir (str): The path to the directory containing PEM-encoded X.509 key pairs. Returns: bool: True of the message passes validation, False otherwise. """ for field in ['signature', 'certificate']: if field not in message: _log.warn('No %s field found.', field) return False if not isinstance(message[field], six.text_type): _log.error('msg[%r] is not a unicode string' % field) try: # Make an effort to decode it, it's very likely utf-8 since that's what # is hardcoded throughout fedmsg. Worst case scenario is it'll cause a # validation error when there shouldn't be one. message[field] = message[field].decode('utf-8') except UnicodeError as e: _log.error("Unable to decode the message '%s' field: %s", field, str(e)) return False signature = base64.b64decode(message['signature']) certificate = base64.b64decode(message['certificate']) message = fedmsg.crypto.strip_credentials(message) # Unfortunately we can't change this defaulting to Fedora behavior until # fedmsg-2.0 ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt') crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem') try: ca_certificate, crl = utils.load_certificates(ca_location, crl_location) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: # Maybe the CA/CRL is expired or just rotated, so invalidate the cache and try again try: ca_certificate, crl = utils.load_certificates( ca_location, crl_location, invalidate_cache=True) _validate_signing_cert(ca_certificate, certificate, crl) except (IOError, RequestException, X509StoreContextError) as e: _log.error(str(e)) return False # Validate the signature of the message itself try: crypto_certificate = x509.load_pem_x509_certificate(certificate, default_backend()) crypto_certificate.public_key().verify( signature, fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1(), ) except InvalidSignature as e: _log.error('message [{m}] has an invalid signature: {e}'.format( m=message, e=str(e))) return False # Step 4, check that the certificate is permitted to emit messages for the # topic. common_name = crypto_certificate.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) common_name = common_name[0] routing_policy = config.get('routing_policy', {}) nitpicky = config.get('routing_nitpicky', False) return utils.validate_policy( message.get('topic'), common_name.value, routing_policy, nitpicky=nitpicky)
Validate the signature on the given message. Four things must be true for the signature to be valid: 1) The X.509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X.509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict. Args: message (dict): A signed message in need of validation. A signed message contains the 'signature' and 'certificate' keys. ssldir (str): The path to the directory containing PEM-encoded X.509 key pairs. Returns: bool: True of the message passes validation, False otherwise.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/x509_ng.py#L127-L206
fedora-infra/fedmsg
fedmsg/crypto/x509_ng.py
_validate_signing_cert
def _validate_signing_cert(ca_certificate, certificate, crl=None): """ Validate an X509 certificate using pyOpenSSL. .. note:: pyOpenSSL is a short-term solution to certificate validation. pyOpenSSL is basically in maintenance mode and there's a desire in upstream to move all the functionality into cryptography. Args: ca_certificate (str): A PEM-encoded Certificate Authority certificate to validate the ``certificate`` with. certificate (str): A PEM-encoded certificate that is in need of validation. crl (str): A PEM-encoded Certificate Revocation List which, if provided, will be taken into account when validating the certificate. Raises: X509StoreContextError: If the certificate failed validation. The exception contains the details of the error. """ pyopenssl_cert = load_certificate(FILETYPE_PEM, certificate) pyopenssl_ca_cert = load_certificate(FILETYPE_PEM, ca_certificate) cert_store = X509Store() cert_store.add_cert(pyopenssl_ca_cert) if crl: pyopenssl_crl = load_crl(FILETYPE_PEM, crl) cert_store.add_crl(pyopenssl_crl) cert_store.set_flags(X509StoreFlags.CRL_CHECK | X509StoreFlags.CRL_CHECK_ALL) cert_store_context = X509StoreContext(cert_store, pyopenssl_cert) cert_store_context.verify_certificate()
python
def _validate_signing_cert(ca_certificate, certificate, crl=None): """ Validate an X509 certificate using pyOpenSSL. .. note:: pyOpenSSL is a short-term solution to certificate validation. pyOpenSSL is basically in maintenance mode and there's a desire in upstream to move all the functionality into cryptography. Args: ca_certificate (str): A PEM-encoded Certificate Authority certificate to validate the ``certificate`` with. certificate (str): A PEM-encoded certificate that is in need of validation. crl (str): A PEM-encoded Certificate Revocation List which, if provided, will be taken into account when validating the certificate. Raises: X509StoreContextError: If the certificate failed validation. The exception contains the details of the error. """ pyopenssl_cert = load_certificate(FILETYPE_PEM, certificate) pyopenssl_ca_cert = load_certificate(FILETYPE_PEM, ca_certificate) cert_store = X509Store() cert_store.add_cert(pyopenssl_ca_cert) if crl: pyopenssl_crl = load_crl(FILETYPE_PEM, crl) cert_store.add_crl(pyopenssl_crl) cert_store.set_flags(X509StoreFlags.CRL_CHECK | X509StoreFlags.CRL_CHECK_ALL) cert_store_context = X509StoreContext(cert_store, pyopenssl_cert) cert_store_context.verify_certificate()
Validate an X509 certificate using pyOpenSSL. .. note:: pyOpenSSL is a short-term solution to certificate validation. pyOpenSSL is basically in maintenance mode and there's a desire in upstream to move all the functionality into cryptography. Args: ca_certificate (str): A PEM-encoded Certificate Authority certificate to validate the ``certificate`` with. certificate (str): A PEM-encoded certificate that is in need of validation. crl (str): A PEM-encoded Certificate Revocation List which, if provided, will be taken into account when validating the certificate. Raises: X509StoreContextError: If the certificate failed validation. The exception contains the details of the error.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/x509_ng.py#L209-L240
fedora-infra/fedmsg
fedmsg/consumers/ircbot.py
IRCBotConsumer.consume
def consume(self, msg): """ Forward on messages from the bus to all IRC connections. """ log.debug("Got message %r" % msg) topic, body = msg.get('topic'), msg.get('body') for client in self.irc_clients: if not client.factory.filters or ( client.factory.filters and self.apply_filters(client.factory.filters, topic, body) ): raw_msg = self.prettify( topic=topic, msg=body, pretty=client.factory.pretty, terse=client.factory.terse, short=client.factory.short, ) send = getattr(client, self.hub.config['irc_method'], 'notice') send(client.factory.channel, raw_msg.encode('utf-8')) backlog = self.incoming.qsize() if backlog and (backlog % 20) == 0: warning = "* backlogged by %i messages" % backlog log.warning(warning) send(client.factory.channel, warning.encode('utf-8'))
python
def consume(self, msg): """ Forward on messages from the bus to all IRC connections. """ log.debug("Got message %r" % msg) topic, body = msg.get('topic'), msg.get('body') for client in self.irc_clients: if not client.factory.filters or ( client.factory.filters and self.apply_filters(client.factory.filters, topic, body) ): raw_msg = self.prettify( topic=topic, msg=body, pretty=client.factory.pretty, terse=client.factory.terse, short=client.factory.short, ) send = getattr(client, self.hub.config['irc_method'], 'notice') send(client.factory.channel, raw_msg.encode('utf-8')) backlog = self.incoming.qsize() if backlog and (backlog % 20) == 0: warning = "* backlogged by %i messages" % backlog log.warning(warning) send(client.factory.channel, warning.encode('utf-8'))
Forward on messages from the bus to all IRC connections.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/consumers/ircbot.py#L352-L376
fedora-infra/fedmsg
fedmsg/commands/check.py
check
def check(timeout, consumer=None, producer=None): """This command is used to check the status of consumers and producers. If no consumers and producers are provided, the status of all consumers and producers is printed. """ # It's weird to say --consumers, but there are multiple, so rename the variables consumers, producers = consumer, producer config = load_config() endpoint = config.get('moksha.monitoring.socket') if not endpoint: raise click.ClickException('No monitoring endpoint has been configured: ' 'please set "moksha.monitoring.socket"') context = zmq.Context.instance() socket = context.socket(zmq.SUB) # ZMQ takes the timeout in milliseconds socket.set(zmq.RCVTIMEO, timeout * 1000) socket.subscribe(b'') socket.connect(endpoint) try: message = socket.recv_json() except zmq.error.Again: raise click.ClickException( 'Failed to receive message from the monitoring endpoint ({e}) in {t} ' 'seconds.'.format(e=endpoint, t=timeout)) if not consumers and not producers: click.echo('No consumers or producers specified so all will be shown.') else: missing = False uninitialized = False for messager_type, messagers in (('consumers', consumers), ('producers', producers)): active = {} for messager in message[messager_type]: active[messager['name']] = messager for messager in messagers: if messager not in active: click.echo('"{m}" is not active!'.format(m=messager), err=True) missing = True else: if active[messager]['initialized'] is not True: click.echo('"{m}" is not initialized!'.format(m=messager), err=True) uninitialized = True if missing: raise click.ClickException('Some consumers and/or producers are missing!') elif uninitialized: raise click.ClickException('Some consumers and/or producers are uninitialized!') else: click.echo('All consumers and producers are active!') click.echo(json.dumps(message, indent=2, sort_keys=True))
python
def check(timeout, consumer=None, producer=None): """This command is used to check the status of consumers and producers. If no consumers and producers are provided, the status of all consumers and producers is printed. """ # It's weird to say --consumers, but there are multiple, so rename the variables consumers, producers = consumer, producer config = load_config() endpoint = config.get('moksha.monitoring.socket') if not endpoint: raise click.ClickException('No monitoring endpoint has been configured: ' 'please set "moksha.monitoring.socket"') context = zmq.Context.instance() socket = context.socket(zmq.SUB) # ZMQ takes the timeout in milliseconds socket.set(zmq.RCVTIMEO, timeout * 1000) socket.subscribe(b'') socket.connect(endpoint) try: message = socket.recv_json() except zmq.error.Again: raise click.ClickException( 'Failed to receive message from the monitoring endpoint ({e}) in {t} ' 'seconds.'.format(e=endpoint, t=timeout)) if not consumers and not producers: click.echo('No consumers or producers specified so all will be shown.') else: missing = False uninitialized = False for messager_type, messagers in (('consumers', consumers), ('producers', producers)): active = {} for messager in message[messager_type]: active[messager['name']] = messager for messager in messagers: if messager not in active: click.echo('"{m}" is not active!'.format(m=messager), err=True) missing = True else: if active[messager]['initialized'] is not True: click.echo('"{m}" is not initialized!'.format(m=messager), err=True) uninitialized = True if missing: raise click.ClickException('Some consumers and/or producers are missing!') elif uninitialized: raise click.ClickException('Some consumers and/or producers are uninitialized!') else: click.echo('All consumers and producers are active!') click.echo(json.dumps(message, indent=2, sort_keys=True))
This command is used to check the status of consumers and producers. If no consumers and producers are provided, the status of all consumers and producers is printed.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/commands/check.py#L43-L97
fedora-infra/fedmsg
fedmsg/__init__.py
init
def init(**kw): """ Initialize an instance of :class:`fedmsg.core.FedMsgContext`. The config is loaded with :func:`fedmsg.config.load_config` and updated by any keyword arguments. This config is used to initialize the context object. The object is stored in a thread local as :data:`fedmsg.__local.__context`. """ if getattr(__local, '__context', None): raise ValueError("fedmsg already initialized") # Read config from CLI args and a config file config = fedmsg.config.load_config([], None) # Override the defaults with whatever the user explicitly passes in. config.update(kw) __local.__context = fedmsg.core.FedMsgContext(**config) return __local.__context
python
def init(**kw): """ Initialize an instance of :class:`fedmsg.core.FedMsgContext`. The config is loaded with :func:`fedmsg.config.load_config` and updated by any keyword arguments. This config is used to initialize the context object. The object is stored in a thread local as :data:`fedmsg.__local.__context`. """ if getattr(__local, '__context', None): raise ValueError("fedmsg already initialized") # Read config from CLI args and a config file config = fedmsg.config.load_config([], None) # Override the defaults with whatever the user explicitly passes in. config.update(kw) __local.__context = fedmsg.core.FedMsgContext(**config) return __local.__context
Initialize an instance of :class:`fedmsg.core.FedMsgContext`. The config is loaded with :func:`fedmsg.config.load_config` and updated by any keyword arguments. This config is used to initialize the context object. The object is stored in a thread local as :data:`fedmsg.__local.__context`.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/__init__.py#L41-L62
fedora-infra/fedmsg
fedmsg/commands/collectd.py
CollectdConsumer.dump
def dump(self): """ Called by CollectdProducer every `n` seconds. """ # Print out the collectd feedback. # This is sent to stdout while other log messages are sent to stderr. for k, v in sorted(self._dict.items()): print(self.formatter(k, v)) # Reset each entry to zero for k, v in sorted(self._dict.items()): self._dict[k] = 0
python
def dump(self): """ Called by CollectdProducer every `n` seconds. """ # Print out the collectd feedback. # This is sent to stdout while other log messages are sent to stderr. for k, v in sorted(self._dict.items()): print(self.formatter(k, v)) # Reset each entry to zero for k, v in sorted(self._dict.items()): self._dict[k] = 0
Called by CollectdProducer every `n` seconds.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/commands/collectd.py#L51-L61
fedora-infra/fedmsg
fedmsg/commands/collectd.py
CollectdConsumer.formatter
def formatter(self, key, value): """ Format messages for collectd to consume. """ template = "PUTVAL {host}/fedmsg/fedmsg_wallboard-{key} " +\ "interval={interval} {timestamp}:{value}" timestamp = int(time.time()) interval = self.hub.config['collectd_interval'] return template.format( host=self.host, timestamp=timestamp, value=value, interval=interval, key=key, )
python
def formatter(self, key, value): """ Format messages for collectd to consume. """ template = "PUTVAL {host}/fedmsg/fedmsg_wallboard-{key} " +\ "interval={interval} {timestamp}:{value}" timestamp = int(time.time()) interval = self.hub.config['collectd_interval'] return template.format( host=self.host, timestamp=timestamp, value=value, interval=interval, key=key, )
Format messages for collectd to consume.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/commands/collectd.py#L63-L75
fedora-infra/fedmsg
fedmsg/crypto/__init__.py
init
def init(**config): """ Initialize the crypto backend. The backend can be one of two plugins: - 'x509' - Uses x509 certificates. - 'gpg' - Uses GnuPG keys. """ global _implementation global _validate_implementations if config.get('crypto_backend') == 'gpg': _implementation = gpg else: _implementation = x509 _validate_implementations = [] for mod in config.get('crypto_validate_backends', []): if mod == 'gpg': _validate_implementations.append(gpg) elif mod == 'x509': _validate_implementations.append(x509) else: raise ValueError("%r is not a valid crypto backend" % mod) if not _validate_implementations: _validate_implementations.append(_implementation)
python
def init(**config): """ Initialize the crypto backend. The backend can be one of two plugins: - 'x509' - Uses x509 certificates. - 'gpg' - Uses GnuPG keys. """ global _implementation global _validate_implementations if config.get('crypto_backend') == 'gpg': _implementation = gpg else: _implementation = x509 _validate_implementations = [] for mod in config.get('crypto_validate_backends', []): if mod == 'gpg': _validate_implementations.append(gpg) elif mod == 'x509': _validate_implementations.append(x509) else: raise ValueError("%r is not a valid crypto backend" % mod) if not _validate_implementations: _validate_implementations.append(_implementation)
Initialize the crypto backend. The backend can be one of two plugins: - 'x509' - Uses x509 certificates. - 'gpg' - Uses GnuPG keys.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/__init__.py#L166-L192
fedora-infra/fedmsg
fedmsg/crypto/__init__.py
sign
def sign(message, **config): """ Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed message digest of the JSON repr. - 'certificate' - the base64 certificate or gpg key of the signator. """ if not _implementation: init(**config) return _implementation.sign(message, **config)
python
def sign(message, **config): """ Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed message digest of the JSON repr. - 'certificate' - the base64 certificate or gpg key of the signator. """ if not _implementation: init(**config) return _implementation.sign(message, **config)
Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed message digest of the JSON repr. - 'certificate' - the base64 certificate or gpg key of the signator.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/__init__.py#L195-L207
fedora-infra/fedmsg
fedmsg/crypto/__init__.py
validate
def validate(message, **config): """ Return true or false if the message is signed appropriately. """ if not _validate_implementations: init(**config) cfg = copy.deepcopy(config) if 'gpg_home' not in cfg: cfg['gpg_home'] = os.path.expanduser('~/.gnupg/') if 'ssldir' not in cfg: cfg['ssldir'] = '/etc/pki/fedmsg' if 'crypto' in message: if not message['crypto'] in _possible_backends: log.warn("Message specified an impossible crypto backend") return False try: backend = _possible_backends[message['crypto']] except Exception as e: log.warn("Failed to load %r %r" % (message['crypto'], e)) return False # fedmsg 0.7.2 and earlier did not specify which crypto backend a message # was signed with. As long as we care about interoperability with those # versions, attempt to guess the backend to use elif 'certificate' in message: backend = x509 elif 'signature' in message: backend = gpg else: log.warn('Could not determine crypto backend. Message unsigned?') return False if backend in _validate_implementations: return backend.validate(message, **cfg) else: log.warn("Crypto backend %r is disallowed" % backend) return False
python
def validate(message, **config): """ Return true or false if the message is signed appropriately. """ if not _validate_implementations: init(**config) cfg = copy.deepcopy(config) if 'gpg_home' not in cfg: cfg['gpg_home'] = os.path.expanduser('~/.gnupg/') if 'ssldir' not in cfg: cfg['ssldir'] = '/etc/pki/fedmsg' if 'crypto' in message: if not message['crypto'] in _possible_backends: log.warn("Message specified an impossible crypto backend") return False try: backend = _possible_backends[message['crypto']] except Exception as e: log.warn("Failed to load %r %r" % (message['crypto'], e)) return False # fedmsg 0.7.2 and earlier did not specify which crypto backend a message # was signed with. As long as we care about interoperability with those # versions, attempt to guess the backend to use elif 'certificate' in message: backend = x509 elif 'signature' in message: backend = gpg else: log.warn('Could not determine crypto backend. Message unsigned?') return False if backend in _validate_implementations: return backend.validate(message, **cfg) else: log.warn("Crypto backend %r is disallowed" % backend) return False
Return true or false if the message is signed appropriately.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/__init__.py#L210-L247
fedora-infra/fedmsg
fedmsg/crypto/__init__.py
validate_signed_by
def validate_signed_by(message, signer, **config): """ Validate that a message was signed by a particular certificate. This works much like ``validate(...)``, but additionally accepts a ``signer`` argument. It will reject a message for any of the regular circumstances, but will also reject it if its not signed by a cert with the argued name. """ config = copy.deepcopy(config) config['routing_nitpicky'] = True config['routing_policy'] = {message['topic']: [signer]} return validate(message, **config)
python
def validate_signed_by(message, signer, **config): """ Validate that a message was signed by a particular certificate. This works much like ``validate(...)``, but additionally accepts a ``signer`` argument. It will reject a message for any of the regular circumstances, but will also reject it if its not signed by a cert with the argued name. """ config = copy.deepcopy(config) config['routing_nitpicky'] = True config['routing_policy'] = {message['topic']: [signer]} return validate(message, **config)
Validate that a message was signed by a particular certificate. This works much like ``validate(...)``, but additionally accepts a ``signer`` argument. It will reject a message for any of the regular circumstances, but will also reject it if its not signed by a cert with the argued name.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/__init__.py#L250-L262
fedora-infra/fedmsg
fedmsg/crypto/__init__.py
strip_credentials
def strip_credentials(message): """ Strip credentials from a message dict. A new dict is returned without either `signature` or `certificate` keys. This method can be called safely; the original dict is not modified. This function is applicable using either using the x509 or gpg backends. """ message = copy.deepcopy(message) for field in ['signature', 'certificate']: if field in message: del message[field] return message
python
def strip_credentials(message): """ Strip credentials from a message dict. A new dict is returned without either `signature` or `certificate` keys. This method can be called safely; the original dict is not modified. This function is applicable using either using the x509 or gpg backends. """ message = copy.deepcopy(message) for field in ['signature', 'certificate']: if field in message: del message[field] return message
Strip credentials from a message dict. A new dict is returned without either `signature` or `certificate` keys. This method can be called safely; the original dict is not modified. This function is applicable using either using the x509 or gpg backends.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/__init__.py#L265-L277
fedora-infra/fedmsg
fedmsg/replay/__init__.py
get_replay
def get_replay(name, query, config, context=None): """ Query the replay endpoint for missed messages. Args: name (str): The replay endpoint name. query (dict): A dictionary used to query the replay endpoint for messages. Queries are dictionaries with the following any of the following keys: * 'seq_ids': A ``list`` of ``int``, matching the seq_id attributes of the messages. It should return at most as many messages as the length of the list, assuming no duplicate. * 'seq_id': A single ``int`` matching the seq_id attribute of the message. Should return a single message. It is intended as a shorthand for singleton ``seq_ids`` queries. * 'seq_id_range': A two-tuple of ``int`` defining a range of seq_id to check. * 'msg_ids': A ``list`` of UUIDs matching the msg_id attribute of the messages. * 'msg_id': A single UUID for the msg_id attribute. * 'time': A tuple of two timestamps. It will return all messages emitted in between. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: generator: A generator that yields message dictionaries. """ endpoint = config.get('replay_endpoints', {}).get(name, None) if not endpoint: raise IOError("No appropriate replay endpoint " "found for {0}".format(name)) if not context: context = zmq.Context(config['io_threads']) # A replay endpoint isn't PUB/SUB but REQ/REP, as it allows # for bidirectional communication socket = context.socket(zmq.REQ) try: socket.connect(endpoint) except zmq.ZMQError as e: raise IOError("Error when connecting to the " "replay endpoint: '{0}'".format(str(e))) # REQ/REP dance socket.send(fedmsg.encoding.dumps(query).encode('utf-8')) msgs = socket.recv_multipart() socket.close() for m in msgs: try: yield fedmsg.encoding.loads(m.decode('utf-8')) except ValueError: # We assume that if it isn't JSON then it's an error message raise ValueError(m)
python
def get_replay(name, query, config, context=None): """ Query the replay endpoint for missed messages. Args: name (str): The replay endpoint name. query (dict): A dictionary used to query the replay endpoint for messages. Queries are dictionaries with the following any of the following keys: * 'seq_ids': A ``list`` of ``int``, matching the seq_id attributes of the messages. It should return at most as many messages as the length of the list, assuming no duplicate. * 'seq_id': A single ``int`` matching the seq_id attribute of the message. Should return a single message. It is intended as a shorthand for singleton ``seq_ids`` queries. * 'seq_id_range': A two-tuple of ``int`` defining a range of seq_id to check. * 'msg_ids': A ``list`` of UUIDs matching the msg_id attribute of the messages. * 'msg_id': A single UUID for the msg_id attribute. * 'time': A tuple of two timestamps. It will return all messages emitted in between. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: generator: A generator that yields message dictionaries. """ endpoint = config.get('replay_endpoints', {}).get(name, None) if not endpoint: raise IOError("No appropriate replay endpoint " "found for {0}".format(name)) if not context: context = zmq.Context(config['io_threads']) # A replay endpoint isn't PUB/SUB but REQ/REP, as it allows # for bidirectional communication socket = context.socket(zmq.REQ) try: socket.connect(endpoint) except zmq.ZMQError as e: raise IOError("Error when connecting to the " "replay endpoint: '{0}'".format(str(e))) # REQ/REP dance socket.send(fedmsg.encoding.dumps(query).encode('utf-8')) msgs = socket.recv_multipart() socket.close() for m in msgs: try: yield fedmsg.encoding.loads(m.decode('utf-8')) except ValueError: # We assume that if it isn't JSON then it's an error message raise ValueError(m)
Query the replay endpoint for missed messages. Args: name (str): The replay endpoint name. query (dict): A dictionary used to query the replay endpoint for messages. Queries are dictionaries with the following any of the following keys: * 'seq_ids': A ``list`` of ``int``, matching the seq_id attributes of the messages. It should return at most as many messages as the length of the list, assuming no duplicate. * 'seq_id': A single ``int`` matching the seq_id attribute of the message. Should return a single message. It is intended as a shorthand for singleton ``seq_ids`` queries. * 'seq_id_range': A two-tuple of ``int`` defining a range of seq_id to check. * 'msg_ids': A ``list`` of UUIDs matching the msg_id attribute of the messages. * 'msg_id': A single UUID for the msg_id attribute. * 'time': A tuple of two timestamps. It will return all messages emitted in between. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: generator: A generator that yields message dictionaries.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/replay/__init__.py#L89-L150
fedora-infra/fedmsg
fedmsg/replay/__init__.py
check_for_replay
def check_for_replay(name, names_to_seq_id, msg, config, context=None): """ Check to see if messages need to be replayed. Args: name (str): The consumer's name. names_to_seq_id (dict): A dictionary that maps names to the last seen sequence ID. msg (dict): The latest message that has arrived. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: list: A list of message dictionaries. """ prev_seq_id = names_to_seq_id.get(name, None) cur_seq_id = msg.get("seq_id", None) if prev_seq_id is None or cur_seq_id is None: return [msg] if cur_seq_id <= prev_seq_id: # Might have been delayed by network lag or something, in which case # we assume the replay has already been asked for and we dismiss it return [] if cur_seq_id == prev_seq_id + 1 or prev_seq_id < 0: ret = [msg] else: ret = list(get_replay(name, { "seq_id_range": (prev_seq_id, cur_seq_id) }, config, context)) if len(ret) == 0 or ret[-1]['seq_id'] < msg['seq_id']: ret.append(msg) names_to_seq_id[name] = cur_seq_id return ret
python
def check_for_replay(name, names_to_seq_id, msg, config, context=None): """ Check to see if messages need to be replayed. Args: name (str): The consumer's name. names_to_seq_id (dict): A dictionary that maps names to the last seen sequence ID. msg (dict): The latest message that has arrived. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: list: A list of message dictionaries. """ prev_seq_id = names_to_seq_id.get(name, None) cur_seq_id = msg.get("seq_id", None) if prev_seq_id is None or cur_seq_id is None: return [msg] if cur_seq_id <= prev_seq_id: # Might have been delayed by network lag or something, in which case # we assume the replay has already been asked for and we dismiss it return [] if cur_seq_id == prev_seq_id + 1 or prev_seq_id < 0: ret = [msg] else: ret = list(get_replay(name, { "seq_id_range": (prev_seq_id, cur_seq_id) }, config, context)) if len(ret) == 0 or ret[-1]['seq_id'] < msg['seq_id']: ret.append(msg) names_to_seq_id[name] = cur_seq_id return ret
Check to see if messages need to be replayed. Args: name (str): The consumer's name. names_to_seq_id (dict): A dictionary that maps names to the last seen sequence ID. msg (dict): The latest message that has arrived. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: list: A list of message dictionaries.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/replay/__init__.py#L153-L194
fedora-infra/fedmsg
fedmsg/crypto/utils.py
fix_datagrepper_message
def fix_datagrepper_message(message): """ See if a message is (probably) a datagrepper message and attempt to mutate it to pass signature validation. Datagrepper adds the 'source_name' and 'source_version' keys. If messages happen to use those keys, they will fail message validation. Additionally, a 'headers' dictionary is present on all responses, regardless of whether it was in the original message or not. This is deleted if it's null, which won't be correct in all cases. Finally, datagrepper turns the 'timestamp' field into a float, but it might have been an integer when the message was signed. A copy of the dictionary is made and returned if altering the message is necessary. I'm so sorry. Args: message (dict): A message to clean up. Returns: dict: A copy of the provided message, with the datagrepper-related keys removed if they were present. """ if not ('source_name' in message and 'source_version' in message): return message # Don't mutate the original message message = message.copy() del message['source_name'] del message['source_version'] # datanommer adds the headers field to the message in all cases. # This is a huge problem because if the signature was generated with a 'headers' # key set and we delete it here, messages will fail validation, but if we don't # messages will fail validation if they didn't have a 'headers' key set. # # There's no way to know whether or not the headers field was part of the signed # message or not. Generally, the problem is datanommer is mutating messages. if 'headers' in message and not message['headers']: del message['headers'] if 'timestamp' in message: message['timestamp'] = int(message['timestamp']) return message
python
def fix_datagrepper_message(message): """ See if a message is (probably) a datagrepper message and attempt to mutate it to pass signature validation. Datagrepper adds the 'source_name' and 'source_version' keys. If messages happen to use those keys, they will fail message validation. Additionally, a 'headers' dictionary is present on all responses, regardless of whether it was in the original message or not. This is deleted if it's null, which won't be correct in all cases. Finally, datagrepper turns the 'timestamp' field into a float, but it might have been an integer when the message was signed. A copy of the dictionary is made and returned if altering the message is necessary. I'm so sorry. Args: message (dict): A message to clean up. Returns: dict: A copy of the provided message, with the datagrepper-related keys removed if they were present. """ if not ('source_name' in message and 'source_version' in message): return message # Don't mutate the original message message = message.copy() del message['source_name'] del message['source_version'] # datanommer adds the headers field to the message in all cases. # This is a huge problem because if the signature was generated with a 'headers' # key set and we delete it here, messages will fail validation, but if we don't # messages will fail validation if they didn't have a 'headers' key set. # # There's no way to know whether or not the headers field was part of the signed # message or not. Generally, the problem is datanommer is mutating messages. if 'headers' in message and not message['headers']: del message['headers'] if 'timestamp' in message: message['timestamp'] = int(message['timestamp']) return message
See if a message is (probably) a datagrepper message and attempt to mutate it to pass signature validation. Datagrepper adds the 'source_name' and 'source_version' keys. If messages happen to use those keys, they will fail message validation. Additionally, a 'headers' dictionary is present on all responses, regardless of whether it was in the original message or not. This is deleted if it's null, which won't be correct in all cases. Finally, datagrepper turns the 'timestamp' field into a float, but it might have been an integer when the message was signed. A copy of the dictionary is made and returned if altering the message is necessary. I'm so sorry. Args: message (dict): A message to clean up. Returns: dict: A copy of the provided message, with the datagrepper-related keys removed if they were present.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/utils.py#L11-L54
fedora-infra/fedmsg
fedmsg/crypto/utils.py
validate_policy
def validate_policy(topic, signer, routing_policy, nitpicky=False): """ Checks that the sender is allowed to emit messages for the given topic. Args: topic (str): The message topic the ``signer`` used when sending the message. signer (str): The Common Name of the certificate used to sign the message. Returns: bool: True if the policy defined in the settings allows the signer to send messages on ``topic``. """ if topic in routing_policy: # If so.. is the signer one of those permitted senders? if signer in routing_policy[topic]: # We are good. The signer of this message is explicitly # whitelisted to send on this topic in our config policy. return True else: # We have a policy for this topic and $homeboy isn't on the list. _log.error("Authorization/routing_policy error. " "Topic %r. Signer %r." % (topic, signer)) return False else: # We don't have a policy for this topic. How we react next for an # underspecified routing_policy is based on a configuration option. # Ideally, we are in nitpicky mode. We leave it disabled while # standing up fedmsg across our environment so that we can build our # policy without having the whole thing come crashing down. if nitpicky: # We *are* in nitpicky mode. We don't have an entry in the # routing_policy for the topic of this message.. and *nobody* # gets in without a pass. That means that we fail the message. _log.error("Authorization/routing_policy underspecified.") return False else: # We are *not* in nitpicky mode. We don't have an entry in the # routing_policy for the topic of this message.. but we don't # really care. _log.warning('No routing policy defined for "{t}" but routing_nitpicky is ' 'False so the message is being treated as authorized.'.format(t=topic)) return True
python
def validate_policy(topic, signer, routing_policy, nitpicky=False): """ Checks that the sender is allowed to emit messages for the given topic. Args: topic (str): The message topic the ``signer`` used when sending the message. signer (str): The Common Name of the certificate used to sign the message. Returns: bool: True if the policy defined in the settings allows the signer to send messages on ``topic``. """ if topic in routing_policy: # If so.. is the signer one of those permitted senders? if signer in routing_policy[topic]: # We are good. The signer of this message is explicitly # whitelisted to send on this topic in our config policy. return True else: # We have a policy for this topic and $homeboy isn't on the list. _log.error("Authorization/routing_policy error. " "Topic %r. Signer %r." % (topic, signer)) return False else: # We don't have a policy for this topic. How we react next for an # underspecified routing_policy is based on a configuration option. # Ideally, we are in nitpicky mode. We leave it disabled while # standing up fedmsg across our environment so that we can build our # policy without having the whole thing come crashing down. if nitpicky: # We *are* in nitpicky mode. We don't have an entry in the # routing_policy for the topic of this message.. and *nobody* # gets in without a pass. That means that we fail the message. _log.error("Authorization/routing_policy underspecified.") return False else: # We are *not* in nitpicky mode. We don't have an entry in the # routing_policy for the topic of this message.. but we don't # really care. _log.warning('No routing policy defined for "{t}" but routing_nitpicky is ' 'False so the message is being treated as authorized.'.format(t=topic)) return True
Checks that the sender is allowed to emit messages for the given topic. Args: topic (str): The message topic the ``signer`` used when sending the message. signer (str): The Common Name of the certificate used to sign the message. Returns: bool: True if the policy defined in the settings allows the signer to send messages on ``topic``.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/utils.py#L57-L99
fedora-infra/fedmsg
fedmsg/crypto/utils.py
load_certificates
def load_certificates(ca_location, crl_location=None, invalidate_cache=False): """ Load the CA certificate and CRL, caching it for future use. .. note:: Providing the location of the CA and CRL as an HTTPS URL is deprecated and will be removed in a future release. Args: ca_location (str): The location of the Certificate Authority certificate. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. crl_location (str): The location of the Certificate Revocation List. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. invalidate_cache (bool): Whether or not to invalidate the certificate cache. Returns: tuple: A tuple of the (CA certificate, CRL) as unicode strings. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read. """ if crl_location is None: crl_location = '' try: if invalidate_cache: del _cached_certificates[ca_location + crl_location] else: return _cached_certificates[ca_location + crl_location] except KeyError: pass ca, crl = None, None if ca_location: ca = _load_certificate(ca_location) if crl_location: crl = _load_certificate(crl_location) _cached_certificates[ca_location + crl_location] = ca, crl return ca, crl
python
def load_certificates(ca_location, crl_location=None, invalidate_cache=False): """ Load the CA certificate and CRL, caching it for future use. .. note:: Providing the location of the CA and CRL as an HTTPS URL is deprecated and will be removed in a future release. Args: ca_location (str): The location of the Certificate Authority certificate. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. crl_location (str): The location of the Certificate Revocation List. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. invalidate_cache (bool): Whether or not to invalidate the certificate cache. Returns: tuple: A tuple of the (CA certificate, CRL) as unicode strings. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read. """ if crl_location is None: crl_location = '' try: if invalidate_cache: del _cached_certificates[ca_location + crl_location] else: return _cached_certificates[ca_location + crl_location] except KeyError: pass ca, crl = None, None if ca_location: ca = _load_certificate(ca_location) if crl_location: crl = _load_certificate(crl_location) _cached_certificates[ca_location + crl_location] = ca, crl return ca, crl
Load the CA certificate and CRL, caching it for future use. .. note:: Providing the location of the CA and CRL as an HTTPS URL is deprecated and will be removed in a future release. Args: ca_location (str): The location of the Certificate Authority certificate. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. crl_location (str): The location of the Certificate Revocation List. This should be the absolute path to a PEM-encoded file. It can also be an HTTPS url, but this is deprecated and will be removed in a future release. invalidate_cache (bool): Whether or not to invalidate the certificate cache. Returns: tuple: A tuple of the (CA certificate, CRL) as unicode strings. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/utils.py#L102-L144
fedora-infra/fedmsg
fedmsg/crypto/utils.py
_load_certificate
def _load_certificate(location): """ Load a certificate from the given location. Args: location (str): The location to load. This can either be an HTTPS URL or an absolute file path. This is intended to be used with PEM-encoded certificates and therefore assumes ASCII encoding. Returns: str: The PEM-encoded certificate as a unicode string. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read. """ if location.startswith('https://'): _log.info('Downloading x509 certificate from %s', location) with requests.Session() as session: session.mount('https://', requests.adapters.HTTPAdapter(max_retries=3)) response = session.get(location, timeout=30) response.raise_for_status() return response.text else: _log.info('Loading local x509 certificate from %s', location) with open(location, 'rb') as fd: return fd.read().decode('ascii')
python
def _load_certificate(location): """ Load a certificate from the given location. Args: location (str): The location to load. This can either be an HTTPS URL or an absolute file path. This is intended to be used with PEM-encoded certificates and therefore assumes ASCII encoding. Returns: str: The PEM-encoded certificate as a unicode string. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read. """ if location.startswith('https://'): _log.info('Downloading x509 certificate from %s', location) with requests.Session() as session: session.mount('https://', requests.adapters.HTTPAdapter(max_retries=3)) response = session.get(location, timeout=30) response.raise_for_status() return response.text else: _log.info('Loading local x509 certificate from %s', location) with open(location, 'rb') as fd: return fd.read().decode('ascii')
Load a certificate from the given location. Args: location (str): The location to load. This can either be an HTTPS URL or an absolute file path. This is intended to be used with PEM-encoded certificates and therefore assumes ASCII encoding. Returns: str: The PEM-encoded certificate as a unicode string. Raises: requests.exception.RequestException: Any exception requests could raise. IOError: If the location provided could not be opened and read.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/utils.py#L147-L173
fedora-infra/fedmsg
fedmsg/config.py
_get_config_files
def _get_config_files(): """ Load the list of file paths for fedmsg configuration files. Returns: list: List of files containing fedmsg configuration. """ config_paths = [] if os.environ.get('FEDMSG_CONFIG'): config_location = os.environ['FEDMSG_CONFIG'] else: config_location = '/etc/fedmsg.d' if os.path.isfile(config_location): config_paths.append(config_location) elif os.path.isdir(config_location): # list dir and add valid files possible_config_files = [os.path.join(config_location, p) for p in os.listdir(config_location) if p.endswith('.py')] for p in possible_config_files: if os.path.isfile(p): config_paths.append(p) if not config_paths: _log.info('No configuration files found in %s', config_location) return config_paths
python
def _get_config_files(): """ Load the list of file paths for fedmsg configuration files. Returns: list: List of files containing fedmsg configuration. """ config_paths = [] if os.environ.get('FEDMSG_CONFIG'): config_location = os.environ['FEDMSG_CONFIG'] else: config_location = '/etc/fedmsg.d' if os.path.isfile(config_location): config_paths.append(config_location) elif os.path.isdir(config_location): # list dir and add valid files possible_config_files = [os.path.join(config_location, p) for p in os.listdir(config_location) if p.endswith('.py')] for p in possible_config_files: if os.path.isfile(p): config_paths.append(p) if not config_paths: _log.info('No configuration files found in %s', config_location) return config_paths
Load the list of file paths for fedmsg configuration files. Returns: list: List of files containing fedmsg configuration.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L741-L765
fedora-infra/fedmsg
fedmsg/config.py
_validate_none_or_type
def _validate_none_or_type(t): """ Create a validator that checks if a setting is either None or a given type. Args: t: The type to assert. Returns: callable: A callable that will validate a setting for that type. """ def _validate(setting): """ Check the setting to make sure it's the right type. Args: setting (object): The setting to check. Returns: object: The unmodified object if it's the proper type. Raises: ValueError: If the setting is the wrong type. """ if setting is not None and not isinstance(setting, t): raise ValueError('"{}" is not "{}"'.format(setting, t)) return setting return _validate
python
def _validate_none_or_type(t): """ Create a validator that checks if a setting is either None or a given type. Args: t: The type to assert. Returns: callable: A callable that will validate a setting for that type. """ def _validate(setting): """ Check the setting to make sure it's the right type. Args: setting (object): The setting to check. Returns: object: The unmodified object if it's the proper type. Raises: ValueError: If the setting is the wrong type. """ if setting is not None and not isinstance(setting, t): raise ValueError('"{}" is not "{}"'.format(setting, t)) return setting return _validate
Create a validator that checks if a setting is either None or a given type. Args: t: The type to assert. Returns: callable: A callable that will validate a setting for that type.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L800-L826
fedora-infra/fedmsg
fedmsg/config.py
_validate_bool
def _validate_bool(value): """ Validate a setting is a bool. Returns: bool: The value as a boolean. Raises: ValueError: If the value can't be parsed as a bool string or isn't already bool. """ if isinstance(value, six.text_type): if value.strip().lower() == 'true': value = True elif value.strip().lower() == 'false': value = False else: raise ValueError('"{}" must be a boolean ("True" or "False")'.format(value)) if not isinstance(value, bool): raise ValueError('"{}" is not a boolean value.'.format(value)) return value
python
def _validate_bool(value): """ Validate a setting is a bool. Returns: bool: The value as a boolean. Raises: ValueError: If the value can't be parsed as a bool string or isn't already bool. """ if isinstance(value, six.text_type): if value.strip().lower() == 'true': value = True elif value.strip().lower() == 'false': value = False else: raise ValueError('"{}" must be a boolean ("True" or "False")'.format(value)) if not isinstance(value, bool): raise ValueError('"{}" is not a boolean value.'.format(value)) return value
Validate a setting is a bool. Returns: bool: The value as a boolean. Raises: ValueError: If the value can't be parsed as a bool string or isn't already bool.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L829-L850
fedora-infra/fedmsg
fedmsg/config.py
load_config
def load_config(extra_args=None, doc=None, filenames=None, invalidate_cache=False, fedmsg_command=False, disable_defaults=False): """ Setup a runtime config dict by integrating the following sources (ordered by precedence): - defaults (unless disable_defaults = True) - config file - command line arguments If the ``fedmsg_command`` argument is False, no command line arguments are checked. """ warnings.warn('Using "load_config" is deprecated and will be removed in a future release;' ' use the "fedmsg.config.conf" dictionary instead.', DeprecationWarning) global __cache if invalidate_cache: __cache = {} if __cache: return __cache # Coerce defaults if arguments are not supplied. extra_args = extra_args or [] doc = doc or "" if not disable_defaults: config = copy.deepcopy(defaults) else: config = {} config.update(_process_config_file(filenames=filenames)) # This is optional (and defaults to false) so that only 'fedmsg-*' commands # are required to provide these arguments. # For instance, the moksha-hub command takes a '-v' argument and internally # makes calls to fedmsg. We don't want to impose all of fedmsg's CLI # option constraints on programs that use fedmsg, so we make it optional. if fedmsg_command: config.update(_process_arguments(extra_args, doc, config)) # If the user specified a config file on the command line, then start over # but read in that file instead. if not filenames and config.get('config_filename', None): return load_config(extra_args, doc, filenames=[config['config_filename']], fedmsg_command=fedmsg_command, disable_defaults=disable_defaults) # Just a little debug option. :) if config.get('print_config'): print(pretty_dumps(config)) sys.exit(0) if not disable_defaults and 'endpoints' not in config: raise ValueError("No config value 'endpoints' found.") if not isinstance(config.get('endpoints', {}), dict): raise ValueError("The 'endpoints' config value must be a dict.") if 'endpoints' in config: config['endpoints'] = dict([ (k, list(iterate(v))) for k, v in config['endpoints'].items() ]) if 'srv_endpoints' in config and len(config['srv_endpoints']) > 0: from dns.resolver import query, NXDOMAIN, Timeout, NoNameservers for e in config['srv_endpoints']: urls = [] try: records = query('_fedmsg._tcp.{0}'.format(e), 'SRV') except NXDOMAIN: warnings.warn("There is no appropriate SRV records " + "for {0}".format(e)) continue except Timeout: warnings.warn("The DNS query for the SRV records of" + " {0} timed out.".format(e)) continue except NoNameservers: warnings.warn("No name server is available, please " + "check the configuration") break for rec in records: urls.append('tcp://{hostname}:{port}'.format( hostname=rec.target.to_text(), port=rec.port )) config['endpoints'][e] = list(iterate(urls)) if 'topic_prefix_re' not in config and 'topic_prefix' in config: # Turn "org.fedoraproject" into "org\.fedoraproject\.[^\W\d_]+" config['topic_prefix_re'] = config['topic_prefix'].replace('.', '\.')\ + '\.[^\W\d_]+' __cache = config return config
python
def load_config(extra_args=None, doc=None, filenames=None, invalidate_cache=False, fedmsg_command=False, disable_defaults=False): """ Setup a runtime config dict by integrating the following sources (ordered by precedence): - defaults (unless disable_defaults = True) - config file - command line arguments If the ``fedmsg_command`` argument is False, no command line arguments are checked. """ warnings.warn('Using "load_config" is deprecated and will be removed in a future release;' ' use the "fedmsg.config.conf" dictionary instead.', DeprecationWarning) global __cache if invalidate_cache: __cache = {} if __cache: return __cache # Coerce defaults if arguments are not supplied. extra_args = extra_args or [] doc = doc or "" if not disable_defaults: config = copy.deepcopy(defaults) else: config = {} config.update(_process_config_file(filenames=filenames)) # This is optional (and defaults to false) so that only 'fedmsg-*' commands # are required to provide these arguments. # For instance, the moksha-hub command takes a '-v' argument and internally # makes calls to fedmsg. We don't want to impose all of fedmsg's CLI # option constraints on programs that use fedmsg, so we make it optional. if fedmsg_command: config.update(_process_arguments(extra_args, doc, config)) # If the user specified a config file on the command line, then start over # but read in that file instead. if not filenames and config.get('config_filename', None): return load_config(extra_args, doc, filenames=[config['config_filename']], fedmsg_command=fedmsg_command, disable_defaults=disable_defaults) # Just a little debug option. :) if config.get('print_config'): print(pretty_dumps(config)) sys.exit(0) if not disable_defaults and 'endpoints' not in config: raise ValueError("No config value 'endpoints' found.") if not isinstance(config.get('endpoints', {}), dict): raise ValueError("The 'endpoints' config value must be a dict.") if 'endpoints' in config: config['endpoints'] = dict([ (k, list(iterate(v))) for k, v in config['endpoints'].items() ]) if 'srv_endpoints' in config and len(config['srv_endpoints']) > 0: from dns.resolver import query, NXDOMAIN, Timeout, NoNameservers for e in config['srv_endpoints']: urls = [] try: records = query('_fedmsg._tcp.{0}'.format(e), 'SRV') except NXDOMAIN: warnings.warn("There is no appropriate SRV records " + "for {0}".format(e)) continue except Timeout: warnings.warn("The DNS query for the SRV records of" + " {0} timed out.".format(e)) continue except NoNameservers: warnings.warn("No name server is available, please " + "check the configuration") break for rec in records: urls.append('tcp://{hostname}:{port}'.format( hostname=rec.target.to_text(), port=rec.port )) config['endpoints'][e] = list(iterate(urls)) if 'topic_prefix_re' not in config and 'topic_prefix' in config: # Turn "org.fedoraproject" into "org\.fedoraproject\.[^\W\d_]+" config['topic_prefix_re'] = config['topic_prefix'].replace('.', '\.')\ + '\.[^\W\d_]+' __cache = config return config
Setup a runtime config dict by integrating the following sources (ordered by precedence): - defaults (unless disable_defaults = True) - config file - command line arguments If the ``fedmsg_command`` argument is False, no command line arguments are checked.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1228-L1330
fedora-infra/fedmsg
fedmsg/config.py
build_parser
def build_parser(declared_args, doc, config=None, prog=None): """ Return the global :class:`argparse.ArgumentParser` used by all fedmsg commands. Extra arguments can be supplied with the `declared_args` argument. """ config = config or copy.deepcopy(defaults) prog = prog or sys.argv[0] parser = argparse.ArgumentParser( description=textwrap.dedent(doc), formatter_class=argparse.RawDescriptionHelpFormatter, prog=prog, ) parser.add_argument( '--io-threads', dest='io_threads', type=int, help="Number of io threads for 0mq to use", default=config['io_threads'], ) parser.add_argument( '--topic-prefix', dest='topic_prefix', type=str, help="Prefix for the topic of each message sent.", default=config['topic_prefix'], ) parser.add_argument( '--post-init-sleep', dest='post_init_sleep', type=float, help="Number of seconds to sleep after initializing.", default=config['post_init_sleep'], ) parser.add_argument( '--config-filename', dest='config_filename', help="Config file to use.", default=None, ) parser.add_argument( '--print-config', dest='print_config', help='Simply print out the configuration and exit. No action taken.', default=False, action='store_true', ) parser.add_argument( '--timeout', dest='timeout', help="Timeout in seconds for any blocking zmq operations.", type=float, default=config['timeout'], ) parser.add_argument( '--high-water-mark', dest='high_water_mark', help="Limit on the number of messages in the queue before blocking.", type=int, default=config['high_water_mark'], ) parser.add_argument( '--linger', dest='zmq_linger', help="Number of milliseconds to wait before timing out connections.", type=int, default=config['zmq_linger'], ) for args, kwargs in declared_args: # Replace the hard-coded extra_args default with the config file value # (if it exists) if all([k in kwargs for k in ['dest', 'default']]): kwargs['default'] = config.get( kwargs['dest'], kwargs['default']) # Having slurped smart defaults from the config file, add the CLI arg. parser.add_argument(*args, **kwargs) return parser
python
def build_parser(declared_args, doc, config=None, prog=None): """ Return the global :class:`argparse.ArgumentParser` used by all fedmsg commands. Extra arguments can be supplied with the `declared_args` argument. """ config = config or copy.deepcopy(defaults) prog = prog or sys.argv[0] parser = argparse.ArgumentParser( description=textwrap.dedent(doc), formatter_class=argparse.RawDescriptionHelpFormatter, prog=prog, ) parser.add_argument( '--io-threads', dest='io_threads', type=int, help="Number of io threads for 0mq to use", default=config['io_threads'], ) parser.add_argument( '--topic-prefix', dest='topic_prefix', type=str, help="Prefix for the topic of each message sent.", default=config['topic_prefix'], ) parser.add_argument( '--post-init-sleep', dest='post_init_sleep', type=float, help="Number of seconds to sleep after initializing.", default=config['post_init_sleep'], ) parser.add_argument( '--config-filename', dest='config_filename', help="Config file to use.", default=None, ) parser.add_argument( '--print-config', dest='print_config', help='Simply print out the configuration and exit. No action taken.', default=False, action='store_true', ) parser.add_argument( '--timeout', dest='timeout', help="Timeout in seconds for any blocking zmq operations.", type=float, default=config['timeout'], ) parser.add_argument( '--high-water-mark', dest='high_water_mark', help="Limit on the number of messages in the queue before blocking.", type=int, default=config['high_water_mark'], ) parser.add_argument( '--linger', dest='zmq_linger', help="Number of milliseconds to wait before timing out connections.", type=int, default=config['zmq_linger'], ) for args, kwargs in declared_args: # Replace the hard-coded extra_args default with the config file value # (if it exists) if all([k in kwargs for k in ['dest', 'default']]): kwargs['default'] = config.get( kwargs['dest'], kwargs['default']) # Having slurped smart defaults from the config file, add the CLI arg. parser.add_argument(*args, **kwargs) return parser
Return the global :class:`argparse.ArgumentParser` used by all fedmsg commands. Extra arguments can be supplied with the `declared_args` argument.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1333-L1415
fedora-infra/fedmsg
fedmsg/config.py
_gather_configs_in
def _gather_configs_in(directory): """ Return list of fully qualified python filenames in the given dir """ try: return sorted([ os.path.join(directory, fname) for fname in os.listdir(directory) if fname.endswith('.py') ]) except OSError: return []
python
def _gather_configs_in(directory): """ Return list of fully qualified python filenames in the given dir """ try: return sorted([ os.path.join(directory, fname) for fname in os.listdir(directory) if fname.endswith('.py') ]) except OSError: return []
Return list of fully qualified python filenames in the given dir
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1424-L1433
fedora-infra/fedmsg
fedmsg/config.py
_recursive_update
def _recursive_update(d1, d2): """ Little helper function that does what d1.update(d2) does, but works nice and recursively with dicts of dicts of dicts. It's not necessarily very efficient. """ for k in set(d1).intersection(d2): if isinstance(d1[k], dict) and isinstance(d2[k], dict): d1[k] = _recursive_update(d1[k], d2[k]) else: d1[k] = d2[k] for k in set(d2).difference(d1): d1[k] = d2[k] return d1
python
def _recursive_update(d1, d2): """ Little helper function that does what d1.update(d2) does, but works nice and recursively with dicts of dicts of dicts. It's not necessarily very efficient. """ for k in set(d1).intersection(d2): if isinstance(d1[k], dict) and isinstance(d2[k], dict): d1[k] = _recursive_update(d1[k], d2[k]) else: d1[k] = d2[k] for k in set(d2).difference(d1): d1[k] = d2[k] return d1
Little helper function that does what d1.update(d2) does, but works nice and recursively with dicts of dicts of dicts. It's not necessarily very efficient.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1436-L1452
fedora-infra/fedmsg
fedmsg/config.py
execfile
def execfile(fname, variables): """ This is builtin in python2, but we have to roll our own on py3. """ with open(fname) as f: code = compile(f.read(), fname, 'exec') exec(code, variables)
python
def execfile(fname, variables): """ This is builtin in python2, but we have to roll our own on py3. """ with open(fname) as f: code = compile(f.read(), fname, 'exec') exec(code, variables)
This is builtin in python2, but we have to roll our own on py3.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1455-L1459
fedora-infra/fedmsg
fedmsg/config.py
FedmsgConfig.get
def get(self, *args, **kw): """Load the configuration if necessary and forward the call to the parent.""" if not self._loaded: self.load_config() return super(FedmsgConfig, self).get(*args, **kw)
python
def get(self, *args, **kw): """Load the configuration if necessary and forward the call to the parent.""" if not self._loaded: self.load_config() return super(FedmsgConfig, self).get(*args, **kw)
Load the configuration if necessary and forward the call to the parent.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1118-L1122
fedora-infra/fedmsg
fedmsg/config.py
FedmsgConfig.copy
def copy(self, *args, **kw): """Load the configuration if necessary and forward the call to the parent.""" if not self._loaded: self.load_config() return super(FedmsgConfig, self).copy(*args, **kw)
python
def copy(self, *args, **kw): """Load the configuration if necessary and forward the call to the parent.""" if not self._loaded: self.load_config() return super(FedmsgConfig, self).copy(*args, **kw)
Load the configuration if necessary and forward the call to the parent.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1124-L1128
fedora-infra/fedmsg
fedmsg/config.py
FedmsgConfig.load_config
def load_config(self, settings=None): """ Load the configuration either from the config file, or from the given settings. Args: settings (dict): If given, the settings are pulled from this dictionary. Otherwise, the config file is used. """ self._load_defaults() if settings: self.update(settings) else: config_paths = _get_config_files() for p in config_paths: conf = _process_config_file([p]) self.update(conf) self._loaded = True self._validate()
python
def load_config(self, settings=None): """ Load the configuration either from the config file, or from the given settings. Args: settings (dict): If given, the settings are pulled from this dictionary. Otherwise, the config file is used. """ self._load_defaults() if settings: self.update(settings) else: config_paths = _get_config_files() for p in config_paths: conf = _process_config_file([p]) self.update(conf) self._loaded = True self._validate()
Load the configuration either from the config file, or from the given settings. Args: settings (dict): If given, the settings are pulled from this dictionary. Otherwise, the config file is used.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1130-L1147
fedora-infra/fedmsg
fedmsg/config.py
FedmsgConfig._load_defaults
def _load_defaults(self): """Iterate over self._defaults and set all default values on self.""" for k, v in self._defaults.items(): self[k] = v['default']
python
def _load_defaults(self): """Iterate over self._defaults and set all default values on self.""" for k, v in self._defaults.items(): self[k] = v['default']
Iterate over self._defaults and set all default values on self.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1149-L1152
fedora-infra/fedmsg
fedmsg/config.py
FedmsgConfig._validate
def _validate(self): """ Run the validators found in self._defaults on all the corresponding values. Raises: ValueError: If the configuration contains an invalid configuration value. """ errors = [] for k in self._defaults.keys(): try: validator = self._defaults[k]['validator'] if validator is not None: self[k] = validator(self[k]) except ValueError as e: errors.append('\t{}: {}'.format(k, six.text_type(e))) if errors: raise ValueError( 'Invalid configuration values were set: \n{}'.format('\n'.join(errors)))
python
def _validate(self): """ Run the validators found in self._defaults on all the corresponding values. Raises: ValueError: If the configuration contains an invalid configuration value. """ errors = [] for k in self._defaults.keys(): try: validator = self._defaults[k]['validator'] if validator is not None: self[k] = validator(self[k]) except ValueError as e: errors.append('\t{}: {}'.format(k, six.text_type(e))) if errors: raise ValueError( 'Invalid configuration values were set: \n{}'.format('\n'.join(errors)))
Run the validators found in self._defaults on all the corresponding values. Raises: ValueError: If the configuration contains an invalid configuration value.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/config.py#L1154-L1172
fedora-infra/fedmsg
fedmsg/meta/__init__.py
make_processors
def make_processors(**config): """ Initialize all of the text processors. You'll need to call this once before using any of the other functions in this module. >>> import fedmsg.config >>> import fedmsg.meta >>> config = fedmsg.config.load_config([], None) >>> fedmsg.meta.make_processors(**config) >>> text = fedmsg.meta.msg2repr(some_message_dict, **config) """ global processors # If they're already initialized, then fine. if processors: return import pkg_resources processors = [] for processor in pkg_resources.iter_entry_points('fedmsg.meta'): try: processors.append(processor.load()(_, **config)) except Exception as e: log.warn("Failed to load %r processor." % processor.name) log.exception(e) # This should always be last processors.append(DefaultProcessor(_, **config)) # By default we have three builtin processors: Default, Logger, and # Announce. If these are the only three, then we didn't find any # externally provided ones. calls to msg2subtitle and msg2link likely will # not work the way the user is expecting. if len(processors) == 3: log.warn("No fedmsg.meta plugins found. fedmsg.meta.msg2* crippled")
python
def make_processors(**config): """ Initialize all of the text processors. You'll need to call this once before using any of the other functions in this module. >>> import fedmsg.config >>> import fedmsg.meta >>> config = fedmsg.config.load_config([], None) >>> fedmsg.meta.make_processors(**config) >>> text = fedmsg.meta.msg2repr(some_message_dict, **config) """ global processors # If they're already initialized, then fine. if processors: return import pkg_resources processors = [] for processor in pkg_resources.iter_entry_points('fedmsg.meta'): try: processors.append(processor.load()(_, **config)) except Exception as e: log.warn("Failed to load %r processor." % processor.name) log.exception(e) # This should always be last processors.append(DefaultProcessor(_, **config)) # By default we have three builtin processors: Default, Logger, and # Announce. If these are the only three, then we didn't find any # externally provided ones. calls to msg2subtitle and msg2link likely will # not work the way the user is expecting. if len(processors) == 3: log.warn("No fedmsg.meta plugins found. fedmsg.meta.msg2* crippled")
Initialize all of the text processors. You'll need to call this once before using any of the other functions in this module. >>> import fedmsg.config >>> import fedmsg.meta >>> config = fedmsg.config.load_config([], None) >>> fedmsg.meta.make_processors(**config) >>> text = fedmsg.meta.msg2repr(some_message_dict, **config)
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L88-L124
fedora-infra/fedmsg
fedmsg/meta/__init__.py
msg2processor
def msg2processor(msg, **config): """ For a given message return the text processor that can handle it. This will raise a :class:`fedmsg.meta.ProcessorsNotInitialized` exception if :func:`fedmsg.meta.make_processors` hasn't been called yet. """ for processor in processors: if processor.handle_msg(msg, **config) is not None: return processor else: return processors[-1]
python
def msg2processor(msg, **config): """ For a given message return the text processor that can handle it. This will raise a :class:`fedmsg.meta.ProcessorsNotInitialized` exception if :func:`fedmsg.meta.make_processors` hasn't been called yet. """ for processor in processors: if processor.handle_msg(msg, **config) is not None: return processor else: return processors[-1]
For a given message return the text processor that can handle it. This will raise a :class:`fedmsg.meta.ProcessorsNotInitialized` exception if :func:`fedmsg.meta.make_processors` hasn't been called yet.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L127-L137
fedora-infra/fedmsg
fedmsg/meta/__init__.py
graceful
def graceful(cls): """ A decorator to protect against message structure changes. Many of our processors expect messages to be in a certain format. If the format changes, they may start to fail and raise exceptions. This decorator is in place to catch and log those exceptions and to gracefully return default values. """ def _wrapper(f): @functools.wraps(f) def __wrapper(msg, **config): try: return f(msg, **config) except KeyError: log.exception("%r failed on %r" % (f, msg.get('msg_id'))) return cls() return __wrapper return _wrapper
python
def graceful(cls): """ A decorator to protect against message structure changes. Many of our processors expect messages to be in a certain format. If the format changes, they may start to fail and raise exceptions. This decorator is in place to catch and log those exceptions and to gracefully return default values. """ def _wrapper(f): @functools.wraps(f) def __wrapper(msg, **config): try: return f(msg, **config) except KeyError: log.exception("%r failed on %r" % (f, msg.get('msg_id'))) return cls() return __wrapper return _wrapper
A decorator to protect against message structure changes. Many of our processors expect messages to be in a certain format. If the format changes, they may start to fail and raise exceptions. This decorator is in place to catch and log those exceptions and to gracefully return default values.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L140-L158
fedora-infra/fedmsg
fedmsg/meta/__init__.py
conglomerate
def conglomerate(messages, subject=None, lexers=False, **config): """ Return a list of messages with some of them grouped into conglomerate messages. Conglomerate messages represent several other messages. For example, you might pass this function a list of 40 messages. 38 of those are git.commit messages, 1 is a bodhi.update message, and 1 is a badge.award message. This function could return a list of three messages, one representing the 38 git commit messages, one representing the bodhi.update message, and one representing the badge.award message. The ``subject`` argument is optional and will return "subjective" representations if possible (see msg2subjective(...)). Functionality is provided by fedmsg.meta plugins on a "best effort" basis. """ # First, give every registered processor a chance to do its work for processor in processors: messages = processor.conglomerate(messages, subject=subject, **config) # Then, just fake it for every other ungrouped message. for i, message in enumerate(messages): # If these were successfully grouped, then skip if 'msg_ids' in message: continue # For ungrouped ones, replace them with a fake conglomerate messages[i] = BaseConglomerator.produce_template( [message], subject=subject, lexers=lexers, **config) # And fill out the fields that fully-implemented conglomerators would # normally fill out. messages[i].update({ 'link': msg2link(message, **config), 'subtitle': msg2subtitle(message, **config), 'subjective': msg2subjective(message, subject=subject, **config), 'secondary_icon': msg2secondary_icon(message, **config), }) return messages
python
def conglomerate(messages, subject=None, lexers=False, **config): """ Return a list of messages with some of them grouped into conglomerate messages. Conglomerate messages represent several other messages. For example, you might pass this function a list of 40 messages. 38 of those are git.commit messages, 1 is a bodhi.update message, and 1 is a badge.award message. This function could return a list of three messages, one representing the 38 git commit messages, one representing the bodhi.update message, and one representing the badge.award message. The ``subject`` argument is optional and will return "subjective" representations if possible (see msg2subjective(...)). Functionality is provided by fedmsg.meta plugins on a "best effort" basis. """ # First, give every registered processor a chance to do its work for processor in processors: messages = processor.conglomerate(messages, subject=subject, **config) # Then, just fake it for every other ungrouped message. for i, message in enumerate(messages): # If these were successfully grouped, then skip if 'msg_ids' in message: continue # For ungrouped ones, replace them with a fake conglomerate messages[i] = BaseConglomerator.produce_template( [message], subject=subject, lexers=lexers, **config) # And fill out the fields that fully-implemented conglomerators would # normally fill out. messages[i].update({ 'link': msg2link(message, **config), 'subtitle': msg2subtitle(message, **config), 'subjective': msg2subjective(message, subject=subject, **config), 'secondary_icon': msg2secondary_icon(message, **config), }) return messages
Return a list of messages with some of them grouped into conglomerate messages. Conglomerate messages represent several other messages. For example, you might pass this function a list of 40 messages. 38 of those are git.commit messages, 1 is a bodhi.update message, and 1 is a badge.award message. This function could return a list of three messages, one representing the 38 git commit messages, one representing the bodhi.update message, and one representing the badge.award message. The ``subject`` argument is optional and will return "subjective" representations if possible (see msg2subjective(...)). Functionality is provided by fedmsg.meta plugins on a "best effort" basis.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L179-L217
fedora-infra/fedmsg
fedmsg/meta/__init__.py
msg2repr
def msg2repr(msg, processor, **config): """ Return a human-readable or "natural language" representation of a dict-like fedmsg message. Think of this as the 'top-most level' function in this module. """ fmt = u"{title} -- {subtitle} {link}" title = msg2title(msg, **config) subtitle = processor.subtitle(msg, **config) link = processor.link(msg, **config) or '' return fmt.format(**locals())
python
def msg2repr(msg, processor, **config): """ Return a human-readable or "natural language" representation of a dict-like fedmsg message. Think of this as the 'top-most level' function in this module. """ fmt = u"{title} -- {subtitle} {link}" title = msg2title(msg, **config) subtitle = processor.subtitle(msg, **config) link = processor.link(msg, **config) or '' return fmt.format(**locals())
Return a human-readable or "natural language" representation of a dict-like fedmsg message. Think of this as the 'top-most level' function in this module.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L222-L232
fedora-infra/fedmsg
fedmsg/meta/__init__.py
msg2long_form
def msg2long_form(msg, processor, **config): """ Return a 'long form' text representation of a message. For most message, this will just default to the terse subtitle, but for some messages a long paragraph-structured block of text may be returned. """ result = processor.long_form(msg, **config) if not result: result = processor.subtitle(msg, **config) return result
python
def msg2long_form(msg, processor, **config): """ Return a 'long form' text representation of a message. For most message, this will just default to the terse subtitle, but for some messages a long paragraph-structured block of text may be returned. """ result = processor.long_form(msg, **config) if not result: result = processor.subtitle(msg, **config) return result
Return a 'long form' text representation of a message. For most message, this will just default to the terse subtitle, but for some messages a long paragraph-structured block of text may be returned.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L251-L260
fedora-infra/fedmsg
fedmsg/meta/__init__.py
msg2usernames
def msg2usernames(msg, processor=None, legacy=False, **config): """ Return a set of FAS usernames associated with a message. """ return processor.usernames(msg, **config)
python
def msg2usernames(msg, processor=None, legacy=False, **config): """ Return a set of FAS usernames associated with a message. """ return processor.usernames(msg, **config)
Return a set of FAS usernames associated with a message.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L293-L295
fedora-infra/fedmsg
fedmsg/meta/__init__.py
msg2agent
def msg2agent(msg, processor=None, legacy=False, **config): """ Return the single username who is the "agent" for an event. An "agent" is the one responsible for the event taking place, for example, if one person gives karma to another, then both usernames are returned by msg2usernames, but only the one who gave the karma is returned by msg2agent. If the processor registered to handle the message does not provide an agent method, then the *first* user returned by msg2usernames is returned (whether that is correct or not). Here we assume that if a processor implements `agent`, then it knows what it is doing and we should trust that. But if it does not implement it, we'll try our best guess. If there are no users returned by msg2usernames, then None is returned. """ if processor.agent is not NotImplemented: return processor.agent(msg, **config) else: usernames = processor.usernames(msg, **config) # usernames is a set(), which doesn't support indexing. if usernames: return usernames.pop() # default to None if we can't find anything return None
python
def msg2agent(msg, processor=None, legacy=False, **config): """ Return the single username who is the "agent" for an event. An "agent" is the one responsible for the event taking place, for example, if one person gives karma to another, then both usernames are returned by msg2usernames, but only the one who gave the karma is returned by msg2agent. If the processor registered to handle the message does not provide an agent method, then the *first* user returned by msg2usernames is returned (whether that is correct or not). Here we assume that if a processor implements `agent`, then it knows what it is doing and we should trust that. But if it does not implement it, we'll try our best guess. If there are no users returned by msg2usernames, then None is returned. """ if processor.agent is not NotImplemented: return processor.agent(msg, **config) else: usernames = processor.usernames(msg, **config) # usernames is a set(), which doesn't support indexing. if usernames: return usernames.pop() # default to None if we can't find anything return None
Return the single username who is the "agent" for an event. An "agent" is the one responsible for the event taking place, for example, if one person gives karma to another, then both usernames are returned by msg2usernames, but only the one who gave the karma is returned by msg2agent. If the processor registered to handle the message does not provide an agent method, then the *first* user returned by msg2usernames is returned (whether that is correct or not). Here we assume that if a processor implements `agent`, then it knows what it is doing and we should trust that. But if it does not implement it, we'll try our best guess. If there are no users returned by msg2usernames, then None is returned.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L299-L325
fedora-infra/fedmsg
fedmsg/meta/__init__.py
msg2subjective
def msg2subjective(msg, processor, subject, **config): """ Return a human-readable text representation of a dict-like fedmsg message from the subjective perspective of a user. For example, if the subject viewing the message is "oddshocks" and the message would normally translate into "oddshocks commented on ticket #174", it would instead translate into "you commented on ticket #174". """ text = processor.subjective(msg, subject, **config) if not text: text = processor.subtitle(msg, **config) return text
python
def msg2subjective(msg, processor, subject, **config): """ Return a human-readable text representation of a dict-like fedmsg message from the subjective perspective of a user. For example, if the subject viewing the message is "oddshocks" and the message would normally translate into "oddshocks commented on ticket #174", it would instead translate into "you commented on ticket #174". """ text = processor.subjective(msg, subject, **config) if not text: text = processor.subtitle(msg, **config) return text
Return a human-readable text representation of a dict-like fedmsg message from the subjective perspective of a user. For example, if the subject viewing the message is "oddshocks" and the message would normally translate into "oddshocks commented on ticket #174", it would instead translate into "you commented on ticket #174".
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L366-L377
fedora-infra/fedmsg
fedmsg/commands/trigger.py
TriggerCommand.run_command
def run_command(self, command, message): """ Use subprocess; feed the message to our command over stdin """ proc = subprocess.Popen([ 'echo \'%s\' | %s' % (fedmsg.encoding.dumps(message), command) ], shell=True, executable='/bin/bash') return proc.wait()
python
def run_command(self, command, message): """ Use subprocess; feed the message to our command over stdin """ proc = subprocess.Popen([ 'echo \'%s\' | %s' % (fedmsg.encoding.dumps(message), command) ], shell=True, executable='/bin/bash') return proc.wait()
Use subprocess; feed the message to our command over stdin
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/commands/trigger.py#L76-L81
fedora-infra/fedmsg
fedmsg/crypto/x509.py
_m2crypto_sign
def _m2crypto_sign(message, ssldir=None, certname=None, **config): """ Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed RSA message digest of the JSON repr. - 'certificate' - the base64 X509 certificate of the sending host. """ if ssldir is None or certname is None: error = "You must set the ssldir and certname keyword arguments." raise ValueError(error) message['crypto'] = 'x509' certificate = M2Crypto.X509.load_cert( "%s/%s.crt" % (ssldir, certname)).as_pem() # Opening this file requires elevated privileges in stg/prod. rsa_private = M2Crypto.RSA.load_key( "%s/%s.key" % (ssldir, certname)) digest = M2Crypto.EVP.MessageDigest('sha1') digest.update(fedmsg.encoding.dumps(message)) signature = rsa_private.sign(digest.digest()) # Return a new dict containing the pairs in the original message as well # as the new authn fields. return dict(message.items() + [ ('signature', signature.encode('base64').decode('ascii')), ('certificate', certificate.encode('base64').decode('ascii')), ])
python
def _m2crypto_sign(message, ssldir=None, certname=None, **config): """ Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed RSA message digest of the JSON repr. - 'certificate' - the base64 X509 certificate of the sending host. """ if ssldir is None or certname is None: error = "You must set the ssldir and certname keyword arguments." raise ValueError(error) message['crypto'] = 'x509' certificate = M2Crypto.X509.load_cert( "%s/%s.crt" % (ssldir, certname)).as_pem() # Opening this file requires elevated privileges in stg/prod. rsa_private = M2Crypto.RSA.load_key( "%s/%s.key" % (ssldir, certname)) digest = M2Crypto.EVP.MessageDigest('sha1') digest.update(fedmsg.encoding.dumps(message)) signature = rsa_private.sign(digest.digest()) # Return a new dict containing the pairs in the original message as well # as the new authn fields. return dict(message.items() + [ ('signature', signature.encode('base64').decode('ascii')), ('certificate', certificate.encode('base64').decode('ascii')), ])
Insert two new fields into the message dict and return it. Those fields are: - 'signature' - the computed RSA message digest of the JSON repr. - 'certificate' - the base64 X509 certificate of the sending host.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/x509.py#L61-L91
fedora-infra/fedmsg
fedmsg/crypto/x509.py
_m2crypto_validate
def _m2crypto_validate(message, ssldir=None, **config): """ Return true or false if the message is signed appropriately. Four things must be true: 1) The X509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict. """ if ssldir is None: raise ValueError("You must set the ssldir keyword argument.") def fail(reason): _log.warn("Failed validation. %s" % reason) return False # Some sanity checking for field in ['signature', 'certificate']: if field not in message: return fail("No %r field found." % field) if not isinstance(message[field], six.text_type): _log.error('msg[%r] is not a unicode string' % field) try: # Make an effort to decode it, it's very likely utf-8 since that's what # is hardcoded throughout fedmsg. Worst case scenario is it'll cause a # validation error when there shouldn't be one. message[field] = message[field].decode('utf-8') except UnicodeError as e: _log.error("Unable to decode the message '%s' field: %s", field, str(e)) return False # Peal off the auth datums signature = message['signature'].decode('base64') certificate = message['certificate'].decode('base64') message = fedmsg.crypto.strip_credentials(message) # Build an X509 object cert = M2Crypto.X509.load_cert_string(certificate) # Validate the cert. Make sure it is signed by our CA. # validate_certificate will one day be a part of M2Crypto.SSL.Context # https://bugzilla.osafoundation.org/show_bug.cgi?id=11690 ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt') crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem') fd, cafile = tempfile.mkstemp() try: ca_certificate, crl = utils.load_certificates(ca_location, crl_location) os.write(fd, ca_certificate.encode('ascii')) os.fsync(fd) ctx = m2ext.SSL.Context() ctx.load_verify_locations(cafile=cafile) if not ctx.validate_certificate(cert): ca_certificate, crl = utils.load_certificates( ca_location, crl_location, invalidate_cache=True) with open(cafile, 'w') as f: f.write(ca_certificate) ctx = m2ext.SSL.Context() ctx.load_verify_locations(cafile=cafile) if not ctx.validate_certificate(cert): return fail("X509 certificate is not valid.") except (IOError, RequestException) as e: _log.error(str(e)) return False finally: os.close(fd) os.remove(cafile) if crl: try: fd, crlfile = tempfile.mkstemp(text=True) os.write(fd, crl.encode('ascii')) os.fsync(fd) crl = M2Crypto.X509.load_crl(crlfile) finally: os.close(fd) os.remove(crlfile) # FIXME -- We need to check that the CRL is signed by our own CA. # See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2 # if not ctx.validate_certificate(crl): # return fail("X509 CRL is not valid.") # FIXME -- we check the CRL, but by doing string comparison ourselves. # This is not what we want to be doing. # There is a patch into M2Crypto to handle this for us. We should use it # once its integrated upstream. # See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2 revoked_serials = [long(line.split(': ')[1].strip(), base=16) for line in crl.as_text().split('\n') if 'Serial Number:' in line] if cert.get_serial_number() in revoked_serials: subject = cert.get_subject() signer = '(no CN)' if subject.nid.get('CN'): entry = subject.get_entries_by_nid(subject.nid['CN'])[0] if entry: signer = entry.get_data().as_text() return fail("X509 cert %r, %r is in the Revocation List (CRL)" % ( signer, cert.get_serial_number())) # If the cert is good, then test to see if the signature in the messages # matches up with the provided cert. rsa_public = cert.get_pubkey().get_rsa() digest = M2Crypto.EVP.MessageDigest('sha1') digest.update(fedmsg.encoding.dumps(message)) try: if not rsa_public.verify(digest.digest(), signature): raise M2Crypto.RSA.RSAError("RSA signature failed to validate.") except M2Crypto.RSA.RSAError as e: return fail(str(e)) # Now we know that the cert is valid. The message is *authenticated*. # * Next step: Authorization * # Load our policy from the config dict. routing_policy = config.get('routing_policy', {}) # Determine the name of the signer of the message. # This will be something like "shell-pkgs01.stg.phx2.fedoraproject.org" subject = cert.get_subject() signer = subject.get_entries_by_nid(subject.nid['CN'])[0]\ .get_data().as_text() return utils.validate_policy( message.get('topic'), signer, routing_policy, config.get('routing_nitpicky', False))
python
def _m2crypto_validate(message, ssldir=None, **config): """ Return true or false if the message is signed appropriately. Four things must be true: 1) The X509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict. """ if ssldir is None: raise ValueError("You must set the ssldir keyword argument.") def fail(reason): _log.warn("Failed validation. %s" % reason) return False # Some sanity checking for field in ['signature', 'certificate']: if field not in message: return fail("No %r field found." % field) if not isinstance(message[field], six.text_type): _log.error('msg[%r] is not a unicode string' % field) try: # Make an effort to decode it, it's very likely utf-8 since that's what # is hardcoded throughout fedmsg. Worst case scenario is it'll cause a # validation error when there shouldn't be one. message[field] = message[field].decode('utf-8') except UnicodeError as e: _log.error("Unable to decode the message '%s' field: %s", field, str(e)) return False # Peal off the auth datums signature = message['signature'].decode('base64') certificate = message['certificate'].decode('base64') message = fedmsg.crypto.strip_credentials(message) # Build an X509 object cert = M2Crypto.X509.load_cert_string(certificate) # Validate the cert. Make sure it is signed by our CA. # validate_certificate will one day be a part of M2Crypto.SSL.Context # https://bugzilla.osafoundation.org/show_bug.cgi?id=11690 ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt') crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem') fd, cafile = tempfile.mkstemp() try: ca_certificate, crl = utils.load_certificates(ca_location, crl_location) os.write(fd, ca_certificate.encode('ascii')) os.fsync(fd) ctx = m2ext.SSL.Context() ctx.load_verify_locations(cafile=cafile) if not ctx.validate_certificate(cert): ca_certificate, crl = utils.load_certificates( ca_location, crl_location, invalidate_cache=True) with open(cafile, 'w') as f: f.write(ca_certificate) ctx = m2ext.SSL.Context() ctx.load_verify_locations(cafile=cafile) if not ctx.validate_certificate(cert): return fail("X509 certificate is not valid.") except (IOError, RequestException) as e: _log.error(str(e)) return False finally: os.close(fd) os.remove(cafile) if crl: try: fd, crlfile = tempfile.mkstemp(text=True) os.write(fd, crl.encode('ascii')) os.fsync(fd) crl = M2Crypto.X509.load_crl(crlfile) finally: os.close(fd) os.remove(crlfile) # FIXME -- We need to check that the CRL is signed by our own CA. # See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2 # if not ctx.validate_certificate(crl): # return fail("X509 CRL is not valid.") # FIXME -- we check the CRL, but by doing string comparison ourselves. # This is not what we want to be doing. # There is a patch into M2Crypto to handle this for us. We should use it # once its integrated upstream. # See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2 revoked_serials = [long(line.split(': ')[1].strip(), base=16) for line in crl.as_text().split('\n') if 'Serial Number:' in line] if cert.get_serial_number() in revoked_serials: subject = cert.get_subject() signer = '(no CN)' if subject.nid.get('CN'): entry = subject.get_entries_by_nid(subject.nid['CN'])[0] if entry: signer = entry.get_data().as_text() return fail("X509 cert %r, %r is in the Revocation List (CRL)" % ( signer, cert.get_serial_number())) # If the cert is good, then test to see if the signature in the messages # matches up with the provided cert. rsa_public = cert.get_pubkey().get_rsa() digest = M2Crypto.EVP.MessageDigest('sha1') digest.update(fedmsg.encoding.dumps(message)) try: if not rsa_public.verify(digest.digest(), signature): raise M2Crypto.RSA.RSAError("RSA signature failed to validate.") except M2Crypto.RSA.RSAError as e: return fail(str(e)) # Now we know that the cert is valid. The message is *authenticated*. # * Next step: Authorization * # Load our policy from the config dict. routing_policy = config.get('routing_policy', {}) # Determine the name of the signer of the message. # This will be something like "shell-pkgs01.stg.phx2.fedoraproject.org" subject = cert.get_subject() signer = subject.get_entries_by_nid(subject.nid['CN'])[0]\ .get_data().as_text() return utils.validate_policy( message.get('topic'), signer, routing_policy, config.get('routing_nitpicky', False))
Return true or false if the message is signed appropriately. Four things must be true: 1) The X509 cert must be signed by our CA 2) The cert must not be in our CRL. 3) We must be able to verify the signature using the RSA public key contained in the X509 cert. 4) The topic of the message and the CN on the cert must appear in the :ref:`conf-routing-policy` dict.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/x509.py#L94-L225
fedora-infra/fedmsg
fedmsg/meta/base.py
BaseProcessor.conglomerate
def conglomerate(self, messages, **config): """ Given N messages, return another list that has some of them grouped together into a common 'item'. A conglomeration of messages should be of the following form:: { 'subtitle': 'relrod pushed commits to ghc and 487 other packages', 'link': None, # This could be something. 'icon': 'https://that-git-logo', 'secondary_icon': 'https://that-relrod-avatar', 'start_time': some_timestamp, 'end_time': some_other_timestamp, 'human_time': '5 minutes ago', 'usernames': ['relrod'], 'packages': ['ghc', 'nethack', ... ], 'topics': ['org.fedoraproject.prod.git.receive'], 'categories': ['git'], 'msg_ids': { '2014-abcde': { 'subtitle': 'relrod pushed some commits to ghc', 'title': 'git.receive', 'link': 'http://...', 'icon': 'http://...', }, '2014-bcdef': { 'subtitle': 'relrod pushed some commits to nethack', 'title': 'git.receive', 'link': 'http://...', 'icon': 'http://...', }, }, } The telltale sign that an entry in a list of messages represents a conglomerate message is the presence of the plural ``msg_ids`` field. In contrast, ungrouped singular messages should bear a singular ``msg_id`` field. """ for conglomerator in self.conglomerator_objects: messages = conglomerator.conglomerate(messages, **config) return messages
python
def conglomerate(self, messages, **config): """ Given N messages, return another list that has some of them grouped together into a common 'item'. A conglomeration of messages should be of the following form:: { 'subtitle': 'relrod pushed commits to ghc and 487 other packages', 'link': None, # This could be something. 'icon': 'https://that-git-logo', 'secondary_icon': 'https://that-relrod-avatar', 'start_time': some_timestamp, 'end_time': some_other_timestamp, 'human_time': '5 minutes ago', 'usernames': ['relrod'], 'packages': ['ghc', 'nethack', ... ], 'topics': ['org.fedoraproject.prod.git.receive'], 'categories': ['git'], 'msg_ids': { '2014-abcde': { 'subtitle': 'relrod pushed some commits to ghc', 'title': 'git.receive', 'link': 'http://...', 'icon': 'http://...', }, '2014-bcdef': { 'subtitle': 'relrod pushed some commits to nethack', 'title': 'git.receive', 'link': 'http://...', 'icon': 'http://...', }, }, } The telltale sign that an entry in a list of messages represents a conglomerate message is the presence of the plural ``msg_ids`` field. In contrast, ungrouped singular messages should bear a singular ``msg_id`` field. """ for conglomerator in self.conglomerator_objects: messages = conglomerator.conglomerate(messages, **config) return messages
Given N messages, return another list that has some of them grouped together into a common 'item'. A conglomeration of messages should be of the following form:: { 'subtitle': 'relrod pushed commits to ghc and 487 other packages', 'link': None, # This could be something. 'icon': 'https://that-git-logo', 'secondary_icon': 'https://that-relrod-avatar', 'start_time': some_timestamp, 'end_time': some_other_timestamp, 'human_time': '5 minutes ago', 'usernames': ['relrod'], 'packages': ['ghc', 'nethack', ... ], 'topics': ['org.fedoraproject.prod.git.receive'], 'categories': ['git'], 'msg_ids': { '2014-abcde': { 'subtitle': 'relrod pushed some commits to ghc', 'title': 'git.receive', 'link': 'http://...', 'icon': 'http://...', }, '2014-bcdef': { 'subtitle': 'relrod pushed some commits to nethack', 'title': 'git.receive', 'link': 'http://...', 'icon': 'http://...', }, }, } The telltale sign that an entry in a list of messages represents a conglomerate message is the presence of the plural ``msg_ids`` field. In contrast, ungrouped singular messages should bear a singular ``msg_id`` field.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/base.py#L103-L144
fedora-infra/fedmsg
fedmsg/meta/base.py
BaseProcessor.handle_msg
def handle_msg(self, msg, **config): """ If we can handle the given message, return the remainder of the topic. Returns None if we can't handle the message. """ match = self.__prefix__.match(msg['topic']) if match: return match.groups()[-1] or ""
python
def handle_msg(self, msg, **config): """ If we can handle the given message, return the remainder of the topic. Returns None if we can't handle the message. """ match = self.__prefix__.match(msg['topic']) if match: return match.groups()[-1] or ""
If we can handle the given message, return the remainder of the topic. Returns None if we can't handle the message.
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/base.py#L146-L154
necaris/python3-openid
openid/extensions/draft/pape2.py
Request.parseExtensionArgs
def parseExtensionArgs(self, args): """Set the state of this request to be that expressed in these PAPE arguments @param args: The PAPE arguments without a namespace @rtype: None @raises ValueError: When the max_auth_age is not parseable as an integer """ # preferred_auth_policies is a space-separated list of policy URIs self.preferred_auth_policies = [] policies_str = args.get('preferred_auth_policies') if policies_str: if isinstance(policies_str, bytes): policies_str = str(policies_str, encoding="utf-8") for uri in policies_str.split(' '): if uri not in self.preferred_auth_policies: self.preferred_auth_policies.append(uri) # max_auth_age is base-10 integer number of seconds max_auth_age_str = args.get('max_auth_age') self.max_auth_age = None if max_auth_age_str: try: self.max_auth_age = int(max_auth_age_str) except ValueError: pass
python
def parseExtensionArgs(self, args): """Set the state of this request to be that expressed in these PAPE arguments @param args: The PAPE arguments without a namespace @rtype: None @raises ValueError: When the max_auth_age is not parseable as an integer """ # preferred_auth_policies is a space-separated list of policy URIs self.preferred_auth_policies = [] policies_str = args.get('preferred_auth_policies') if policies_str: if isinstance(policies_str, bytes): policies_str = str(policies_str, encoding="utf-8") for uri in policies_str.split(' '): if uri not in self.preferred_auth_policies: self.preferred_auth_policies.append(uri) # max_auth_age is base-10 integer number of seconds max_auth_age_str = args.get('max_auth_age') self.max_auth_age = None if max_auth_age_str: try: self.max_auth_age = int(max_auth_age_str) except ValueError: pass
Set the state of this request to be that expressed in these PAPE arguments @param args: The PAPE arguments without a namespace @rtype: None @raises ValueError: When the max_auth_age is not parseable as an integer
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/extensions/draft/pape2.py#L101-L132
necaris/python3-openid
openid/consumer/consumer.py
Consumer.begin
def begin(self, user_url, anonymous=False): """Start the OpenID authentication process. See steps 1-2 in the overview at the top of this file. @param user_url: Identity URL given by the user. This method performs a textual transformation of the URL to try and make sure it is normalized. For example, a user_url of example.com will be normalized to http://example.com/ normalizing and resolving any redirects the server might issue. @type user_url: unicode @param anonymous: Whether to make an anonymous request of the OpenID provider. Such a request does not ask for an authorization assertion for an OpenID identifier, but may be used with extensions to pass other data. e.g. "I don't care who you are, but I'd like to know your time zone." @type anonymous: bool @returns: An object containing the discovered information will be returned, with a method for building a redirect URL to the server, as described in step 3 of the overview. This object may also be used to add extension arguments to the request, using its L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>} method. @returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @raises openid.consumer.discover.DiscoveryFailure: when I fail to find an OpenID server for this URL. If the C{yadis} package is available, L{openid.consumer.discover.DiscoveryFailure} is an alias for C{yadis.discover.DiscoveryFailure}. """ disco = Discovery(self.session, user_url, self.session_key_prefix) try: service = disco.getNextService(self._discover) except fetchers.HTTPFetchingError as why: raise DiscoveryFailure('Error fetching XRDS document: %s' % (why.why, ), None) if service is None: raise DiscoveryFailure('No usable OpenID services found for %s' % (user_url, ), None) else: return self.beginWithoutDiscovery(service, anonymous)
python
def begin(self, user_url, anonymous=False): """Start the OpenID authentication process. See steps 1-2 in the overview at the top of this file. @param user_url: Identity URL given by the user. This method performs a textual transformation of the URL to try and make sure it is normalized. For example, a user_url of example.com will be normalized to http://example.com/ normalizing and resolving any redirects the server might issue. @type user_url: unicode @param anonymous: Whether to make an anonymous request of the OpenID provider. Such a request does not ask for an authorization assertion for an OpenID identifier, but may be used with extensions to pass other data. e.g. "I don't care who you are, but I'd like to know your time zone." @type anonymous: bool @returns: An object containing the discovered information will be returned, with a method for building a redirect URL to the server, as described in step 3 of the overview. This object may also be used to add extension arguments to the request, using its L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>} method. @returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @raises openid.consumer.discover.DiscoveryFailure: when I fail to find an OpenID server for this URL. If the C{yadis} package is available, L{openid.consumer.discover.DiscoveryFailure} is an alias for C{yadis.discover.DiscoveryFailure}. """ disco = Discovery(self.session, user_url, self.session_key_prefix) try: service = disco.getNextService(self._discover) except fetchers.HTTPFetchingError as why: raise DiscoveryFailure('Error fetching XRDS document: %s' % (why.why, ), None) if service is None: raise DiscoveryFailure('No usable OpenID services found for %s' % (user_url, ), None) else: return self.beginWithoutDiscovery(service, anonymous)
Start the OpenID authentication process. See steps 1-2 in the overview at the top of this file. @param user_url: Identity URL given by the user. This method performs a textual transformation of the URL to try and make sure it is normalized. For example, a user_url of example.com will be normalized to http://example.com/ normalizing and resolving any redirects the server might issue. @type user_url: unicode @param anonymous: Whether to make an anonymous request of the OpenID provider. Such a request does not ask for an authorization assertion for an OpenID identifier, but may be used with extensions to pass other data. e.g. "I don't care who you are, but I'd like to know your time zone." @type anonymous: bool @returns: An object containing the discovered information will be returned, with a method for building a redirect URL to the server, as described in step 3 of the overview. This object may also be used to add extension arguments to the request, using its L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>} method. @returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @raises openid.consumer.discover.DiscoveryFailure: when I fail to find an OpenID server for this URL. If the C{yadis} package is available, L{openid.consumer.discover.DiscoveryFailure} is an alias for C{yadis.discover.DiscoveryFailure}.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L312-L359
necaris/python3-openid
openid/consumer/consumer.py
Consumer.beginWithoutDiscovery
def beginWithoutDiscovery(self, service, anonymous=False): """Start OpenID verification without doing OpenID server discovery. This method is used internally by Consumer.begin after discovery is performed, and exists to provide an interface for library users needing to perform their own discovery. @param service: an OpenID service endpoint descriptor. This object and factories for it are found in the L{openid.consumer.discover} module. @type service: L{OpenIDServiceEndpoint<openid.consumer.discover.OpenIDServiceEndpoint>} @returns: an OpenID authentication request object. @rtype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @See: Openid.consumer.consumer.Consumer.begin @see: openid.consumer.discover """ auth_req = self.consumer.begin(service) self.session[self._token_key] = auth_req.endpoint try: auth_req.setAnonymous(anonymous) except ValueError as why: raise ProtocolError(str(why)) return auth_req
python
def beginWithoutDiscovery(self, service, anonymous=False): """Start OpenID verification without doing OpenID server discovery. This method is used internally by Consumer.begin after discovery is performed, and exists to provide an interface for library users needing to perform their own discovery. @param service: an OpenID service endpoint descriptor. This object and factories for it are found in the L{openid.consumer.discover} module. @type service: L{OpenIDServiceEndpoint<openid.consumer.discover.OpenIDServiceEndpoint>} @returns: an OpenID authentication request object. @rtype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @See: Openid.consumer.consumer.Consumer.begin @see: openid.consumer.discover """ auth_req = self.consumer.begin(service) self.session[self._token_key] = auth_req.endpoint try: auth_req.setAnonymous(anonymous) except ValueError as why: raise ProtocolError(str(why)) return auth_req
Start OpenID verification without doing OpenID server discovery. This method is used internally by Consumer.begin after discovery is performed, and exists to provide an interface for library users needing to perform their own discovery. @param service: an OpenID service endpoint descriptor. This object and factories for it are found in the L{openid.consumer.discover} module. @type service: L{OpenIDServiceEndpoint<openid.consumer.discover.OpenIDServiceEndpoint>} @returns: an OpenID authentication request object. @rtype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @See: Openid.consumer.consumer.Consumer.begin @see: openid.consumer.discover
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L361-L390
necaris/python3-openid
openid/consumer/consumer.py
GenericConsumer._checkReturnTo
def _checkReturnTo(self, message, return_to): """Check an OpenID message and its openid.return_to value against a return_to URL from an application. Return True on success, False on failure. """ # Check the openid.return_to args against args in the original # message. try: self._verifyReturnToArgs(message.toPostArgs()) except ProtocolError as why: logging.exception("Verifying return_to arguments: %s" % (why, )) return False # Check the return_to base URL against the one in the message. msg_return_to = message.getArg(OPENID_NS, 'return_to') # The URL scheme, authority, and path MUST be the same between # the two URLs. app_parts = urlparse(urinorm.urinorm(return_to)) msg_parts = urlparse(urinorm.urinorm(msg_return_to)) # (addressing scheme, network location, path) must be equal in # both URLs. for part in range(0, 3): if app_parts[part] != msg_parts[part]: return False return True
python
def _checkReturnTo(self, message, return_to): """Check an OpenID message and its openid.return_to value against a return_to URL from an application. Return True on success, False on failure. """ # Check the openid.return_to args against args in the original # message. try: self._verifyReturnToArgs(message.toPostArgs()) except ProtocolError as why: logging.exception("Verifying return_to arguments: %s" % (why, )) return False # Check the return_to base URL against the one in the message. msg_return_to = message.getArg(OPENID_NS, 'return_to') # The URL scheme, authority, and path MUST be the same between # the two URLs. app_parts = urlparse(urinorm.urinorm(return_to)) msg_parts = urlparse(urinorm.urinorm(msg_return_to)) # (addressing scheme, network location, path) must be equal in # both URLs. for part in range(0, 3): if app_parts[part] != msg_parts[part]: return False return True
Check an OpenID message and its openid.return_to value against a return_to URL from an application. Return True on success, False on failure.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L665-L692
necaris/python3-openid
openid/consumer/consumer.py
GenericConsumer._verifyDiscoveredServices
def _verifyDiscoveredServices(self, claimed_id, services, to_match_endpoints): """See @L{_discoverAndVerify}""" # Search the services resulting from discovery to find one # that matches the information from the assertion failure_messages = [] for endpoint in services: for to_match_endpoint in to_match_endpoints: try: self._verifyDiscoverySingle(endpoint, to_match_endpoint) except ProtocolError as why: failure_messages.append(str(why)) else: # It matches, so discover verification has # succeeded. Return this endpoint. return endpoint else: logging.error('Discovery verification failure for %s' % (claimed_id, )) for failure_message in failure_messages: logging.error(' * Endpoint mismatch: ' + failure_message) raise DiscoveryFailure( 'No matching endpoint found after discovering %s' % (claimed_id, ), None)
python
def _verifyDiscoveredServices(self, claimed_id, services, to_match_endpoints): """See @L{_discoverAndVerify}""" # Search the services resulting from discovery to find one # that matches the information from the assertion failure_messages = [] for endpoint in services: for to_match_endpoint in to_match_endpoints: try: self._verifyDiscoverySingle(endpoint, to_match_endpoint) except ProtocolError as why: failure_messages.append(str(why)) else: # It matches, so discover verification has # succeeded. Return this endpoint. return endpoint else: logging.error('Discovery verification failure for %s' % (claimed_id, )) for failure_message in failure_messages: logging.error(' * Endpoint mismatch: ' + failure_message) raise DiscoveryFailure( 'No matching endpoint found after discovering %s' % (claimed_id, ), None)
See @L{_discoverAndVerify}
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L1077-L1102
necaris/python3-openid
openid/consumer/consumer.py
GenericConsumer._checkAuth
def _checkAuth(self, message, server_url): """Make a check_authentication request to verify this message. @returns: True if the request is valid. @rtype: bool """ logging.info('Using OpenID check_authentication') request = self._createCheckAuthRequest(message) if request is None: return False try: response = self._makeKVPost(request, server_url) except (fetchers.HTTPFetchingError, ServerError) as e: e0 = e.args[0] logging.exception('check_authentication failed: %s' % e0) return False else: return self._processCheckAuthResponse(response, server_url)
python
def _checkAuth(self, message, server_url): """Make a check_authentication request to verify this message. @returns: True if the request is valid. @rtype: bool """ logging.info('Using OpenID check_authentication') request = self._createCheckAuthRequest(message) if request is None: return False try: response = self._makeKVPost(request, server_url) except (fetchers.HTTPFetchingError, ServerError) as e: e0 = e.args[0] logging.exception('check_authentication failed: %s' % e0) return False else: return self._processCheckAuthResponse(response, server_url)
Make a check_authentication request to verify this message. @returns: True if the request is valid. @rtype: bool
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L1104-L1121
necaris/python3-openid
openid/consumer/consumer.py
GenericConsumer._createCheckAuthRequest
def _createCheckAuthRequest(self, message): """Generate a check_authentication request message given an id_res message. """ signed = message.getArg(OPENID_NS, 'signed') if signed: if isinstance(signed, bytes): signed = str(signed, encoding="utf-8") for k in signed.split(','): logging.info(k) val = message.getAliasedArg(k) # Signed value is missing if val is None: logging.info('Missing signed field %r' % (k, )) return None check_auth_message = message.copy() check_auth_message.setArg(OPENID_NS, 'mode', 'check_authentication') return check_auth_message
python
def _createCheckAuthRequest(self, message): """Generate a check_authentication request message given an id_res message. """ signed = message.getArg(OPENID_NS, 'signed') if signed: if isinstance(signed, bytes): signed = str(signed, encoding="utf-8") for k in signed.split(','): logging.info(k) val = message.getAliasedArg(k) # Signed value is missing if val is None: logging.info('Missing signed field %r' % (k, )) return None check_auth_message = message.copy() check_auth_message.setArg(OPENID_NS, 'mode', 'check_authentication') return check_auth_message
Generate a check_authentication request message given an id_res message.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L1123-L1142
necaris/python3-openid
openid/consumer/consumer.py
GenericConsumer._negotiateAssociation
def _negotiateAssociation(self, endpoint): """Make association requests to the server, attempting to create a new association. @returns: a new association object @rtype: L{openid.association.Association} """ # Get our preferred session/association type from the negotiatior. assoc_type, session_type = self.negotiator.getAllowedType() try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: supportedTypes = self._extractSupportedAssociationType( why, endpoint, assoc_type) if supportedTypes is not None: assoc_type, session_type = supportedTypes # Attempt to create an association from the assoc_type # and session_type that the server told us it # supported. try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: # Do not keep trying, since it rejected the # association type that it told us to use. logging.error( 'Server %s refused its suggested association ' 'type: session_type=%s, assoc_type=%s' % ( endpoint.server_url, session_type, assoc_type)) return None else: return assoc else: return assoc
python
def _negotiateAssociation(self, endpoint): """Make association requests to the server, attempting to create a new association. @returns: a new association object @rtype: L{openid.association.Association} """ # Get our preferred session/association type from the negotiatior. assoc_type, session_type = self.negotiator.getAllowedType() try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: supportedTypes = self._extractSupportedAssociationType( why, endpoint, assoc_type) if supportedTypes is not None: assoc_type, session_type = supportedTypes # Attempt to create an association from the assoc_type # and session_type that the server told us it # supported. try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: # Do not keep trying, since it rejected the # association type that it told us to use. logging.error( 'Server %s refused its suggested association ' 'type: session_type=%s, assoc_type=%s' % ( endpoint.server_url, session_type, assoc_type)) return None else: return assoc else: return assoc
Make association requests to the server, attempting to create a new association. @returns: a new association object @rtype: L{openid.association.Association}
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L1187-L1223
necaris/python3-openid
openid/consumer/consumer.py
GenericConsumer._getOpenID1SessionType
def _getOpenID1SessionType(self, assoc_response): """Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent. """ # If it's an OpenID 1 message, allow session_type to default # to None (which signifies "no-encryption") session_type = assoc_response.getArg(OPENID1_NS, 'session_type') # Handle the differences between no-encryption association # respones in OpenID 1 and 2: # no-encryption is not really a valid session type for # OpenID 1, but we'll accept it anyway, while issuing a # warning. if session_type == 'no-encryption': logging.warning('OpenID server sent "no-encryption"' 'for OpenID 1.X') # Missing or empty session type is the way to flag a # 'no-encryption' response. Change the session type to # 'no-encryption' so that it can be handled in the same # way as OpenID 2 'no-encryption' respones. elif session_type == '' or session_type is None: session_type = 'no-encryption' return session_type
python
def _getOpenID1SessionType(self, assoc_response): """Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent. """ # If it's an OpenID 1 message, allow session_type to default # to None (which signifies "no-encryption") session_type = assoc_response.getArg(OPENID1_NS, 'session_type') # Handle the differences between no-encryption association # respones in OpenID 1 and 2: # no-encryption is not really a valid session type for # OpenID 1, but we'll accept it anyway, while issuing a # warning. if session_type == 'no-encryption': logging.warning('OpenID server sent "no-encryption"' 'for OpenID 1.X') # Missing or empty session type is the way to flag a # 'no-encryption' response. Change the session type to # 'no-encryption' so that it can be handled in the same # way as OpenID 2 'no-encryption' respones. elif session_type == '' or session_type is None: session_type = 'no-encryption' return session_type
Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L1343-L1379
necaris/python3-openid
openid/consumer/discover.py
normalizeURL
def normalizeURL(url): """Normalize a URL, converting normalization failures to DiscoveryFailure""" try: normalized = urinorm.urinorm(url) except ValueError as why: raise DiscoveryFailure('Normalizing identifier: %s' % (why, ), None) else: return urllib.parse.urldefrag(normalized)[0]
python
def normalizeURL(url): """Normalize a URL, converting normalization failures to DiscoveryFailure""" try: normalized = urinorm.urinorm(url) except ValueError as why: raise DiscoveryFailure('Normalizing identifier: %s' % (why, ), None) else: return urllib.parse.urldefrag(normalized)[0]
Normalize a URL, converting normalization failures to DiscoveryFailure
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/discover.py#L290-L298
necaris/python3-openid
openid/consumer/discover.py
OpenIDServiceEndpoint.getDisplayIdentifier
def getDisplayIdentifier(self): """Return the display_identifier if set, else return the claimed_id. """ if self.display_identifier is not None: return self.display_identifier if self.claimed_id is None: return None else: return urllib.parse.urldefrag(self.claimed_id)[0]
python
def getDisplayIdentifier(self): """Return the display_identifier if set, else return the claimed_id. """ if self.display_identifier is not None: return self.display_identifier if self.claimed_id is None: return None else: return urllib.parse.urldefrag(self.claimed_id)[0]
Return the display_identifier if set, else return the claimed_id.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/discover.py#L84-L92
necaris/python3-openid
openid/yadis/etxrd.py
parseXRDS
def parseXRDS(text): """Parse the given text as an XRDS document. @return: ElementTree containing an XRDS document @raises XRDSError: When there is a parse error or the document does not contain an XRDS. """ try: # lxml prefers to parse bytestrings, and occasionally chokes on a # combination of text strings and declared XML encodings -- see # https://github.com/necaris/python3-openid/issues/19 # To avoid this, we ensure that the 'text' we're parsing is actually # a bytestring bytestring = text.encode('utf8') if isinstance(text, str) else text element = SafeElementTree.XML(bytestring) except (SystemExit, MemoryError, AssertionError, ImportError): raise except Exception as why: exc = XRDSError('Error parsing document as XML') exc.reason = why raise exc else: tree = ElementTree.ElementTree(element) if not isXRDS(tree): raise XRDSError('Not an XRDS document') return tree
python
def parseXRDS(text): """Parse the given text as an XRDS document. @return: ElementTree containing an XRDS document @raises XRDSError: When there is a parse error or the document does not contain an XRDS. """ try: # lxml prefers to parse bytestrings, and occasionally chokes on a # combination of text strings and declared XML encodings -- see # https://github.com/necaris/python3-openid/issues/19 # To avoid this, we ensure that the 'text' we're parsing is actually # a bytestring bytestring = text.encode('utf8') if isinstance(text, str) else text element = SafeElementTree.XML(bytestring) except (SystemExit, MemoryError, AssertionError, ImportError): raise except Exception as why: exc = XRDSError('Error parsing document as XML') exc.reason = why raise exc else: tree = ElementTree.ElementTree(element) if not isXRDS(tree): raise XRDSError('Not an XRDS document') return tree
Parse the given text as an XRDS document. @return: ElementTree containing an XRDS document @raises XRDSError: When there is a parse error or the document does not contain an XRDS.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/yadis/etxrd.py#L49-L76
necaris/python3-openid
openid/yadis/etxrd.py
prioSort
def prioSort(elements): """Sort a list of elements that have priority attributes""" # Randomize the services before sorting so that equal priority # elements are load-balanced. random.shuffle(elements) sorted_elems = sorted(elements, key=getPriority) return sorted_elems
python
def prioSort(elements): """Sort a list of elements that have priority attributes""" # Randomize the services before sorting so that equal priority # elements are load-balanced. random.shuffle(elements) sorted_elems = sorted(elements, key=getPriority) return sorted_elems
Sort a list of elements that have priority attributes
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/yadis/etxrd.py#L251-L258
necaris/python3-openid
openid/oidutil.py
importSafeElementTree
def importSafeElementTree(module_names=None): """Find a working ElementTree implementation that is not vulnerable to XXE, using `defusedxml`. >>> XXESafeElementTree = importSafeElementTree() @param module_names: The names of modules to try to use as a safe ElementTree. Defaults to C{L{xxe_safe_elementtree_modules}} @returns: An ElementTree module that is not vulnerable to XXE. """ if module_names is None: module_names = xxe_safe_elementtree_modules try: return importElementTree(module_names) except ImportError: raise ImportError('Unable to find a ElementTree module ' 'that is not vulnerable to XXE. ' 'Tried importing %r' % (module_names, ))
python
def importSafeElementTree(module_names=None): """Find a working ElementTree implementation that is not vulnerable to XXE, using `defusedxml`. >>> XXESafeElementTree = importSafeElementTree() @param module_names: The names of modules to try to use as a safe ElementTree. Defaults to C{L{xxe_safe_elementtree_modules}} @returns: An ElementTree module that is not vulnerable to XXE. """ if module_names is None: module_names = xxe_safe_elementtree_modules try: return importElementTree(module_names) except ImportError: raise ImportError('Unable to find a ElementTree module ' 'that is not vulnerable to XXE. ' 'Tried importing %r' % (module_names, ))
Find a working ElementTree implementation that is not vulnerable to XXE, using `defusedxml`. >>> XXESafeElementTree = importSafeElementTree() @param module_names: The names of modules to try to use as a safe ElementTree. Defaults to C{L{xxe_safe_elementtree_modules}} @returns: An ElementTree module that is not vulnerable to XXE.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/oidutil.py#L69-L87
necaris/python3-openid
openid/oidutil.py
appendArgs
def appendArgs(url, args): """Append query arguments to a HTTP(s) URL. If the URL already has query arguemtns, these arguments will be added, and the existing arguments will be preserved. Duplicate arguments will not be detected or collapsed (both will appear in the output). @param url: The url to which the arguments will be appended @type url: str @param args: The query arguments to add to the URL. If a dictionary is passed, the items will be sorted before appending them to the URL. If a sequence of pairs is passed, the order of the sequence will be preserved. @type args: A dictionary from string to string, or a sequence of pairs of strings. @returns: The URL with the parameters added @rtype: str """ if hasattr(args, 'items'): args = sorted(args.items()) else: args = list(args) if not isinstance(url, str): url = str(url, encoding="utf-8") if not args: return url if '?' in url: sep = '&' else: sep = '?' # Map unicode to UTF-8 if present. Do not make any assumptions # about the encodings of plain bytes (str). i = 0 for k, v in args: if not isinstance(k, bytes): k = k.encode('utf-8') if not isinstance(v, bytes): v = v.encode('utf-8') args[i] = (k, v) i += 1 return '%s%s%s' % (url, sep, urlencode(args))
python
def appendArgs(url, args): """Append query arguments to a HTTP(s) URL. If the URL already has query arguemtns, these arguments will be added, and the existing arguments will be preserved. Duplicate arguments will not be detected or collapsed (both will appear in the output). @param url: The url to which the arguments will be appended @type url: str @param args: The query arguments to add to the URL. If a dictionary is passed, the items will be sorted before appending them to the URL. If a sequence of pairs is passed, the order of the sequence will be preserved. @type args: A dictionary from string to string, or a sequence of pairs of strings. @returns: The URL with the parameters added @rtype: str """ if hasattr(args, 'items'): args = sorted(args.items()) else: args = list(args) if not isinstance(url, str): url = str(url, encoding="utf-8") if not args: return url if '?' in url: sep = '&' else: sep = '?' # Map unicode to UTF-8 if present. Do not make any assumptions # about the encodings of plain bytes (str). i = 0 for k, v in args: if not isinstance(k, bytes): k = k.encode('utf-8') if not isinstance(v, bytes): v = v.encode('utf-8') args[i] = (k, v) i += 1 return '%s%s%s' % (url, sep, urlencode(args))
Append query arguments to a HTTP(s) URL. If the URL already has query arguemtns, these arguments will be added, and the existing arguments will be preserved. Duplicate arguments will not be detected or collapsed (both will appear in the output). @param url: The url to which the arguments will be appended @type url: str @param args: The query arguments to add to the URL. If a dictionary is passed, the items will be sorted before appending them to the URL. If a sequence of pairs is passed, the order of the sequence will be preserved. @type args: A dictionary from string to string, or a sequence of pairs of strings. @returns: The URL with the parameters added @rtype: str
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/oidutil.py#L149-L197
necaris/python3-openid
openid/oidutil.py
toBase64
def toBase64(s): """Represent string / bytes s as base64, omitting newlines""" if isinstance(s, str): s = s.encode("utf-8") return binascii.b2a_base64(s)[:-1]
python
def toBase64(s): """Represent string / bytes s as base64, omitting newlines""" if isinstance(s, str): s = s.encode("utf-8") return binascii.b2a_base64(s)[:-1]
Represent string / bytes s as base64, omitting newlines
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/oidutil.py#L200-L204
necaris/python3-openid
openid/store/memstore.py
ServerAssocs.cleanup
def cleanup(self): """Remove expired associations. @return: tuple of (removed associations, remaining associations) """ remove = [] for handle, assoc in self.assocs.items(): if assoc.expiresIn == 0: remove.append(handle) for handle in remove: del self.assocs[handle] return len(remove), len(self.assocs)
python
def cleanup(self): """Remove expired associations. @return: tuple of (removed associations, remaining associations) """ remove = [] for handle, assoc in self.assocs.items(): if assoc.expiresIn == 0: remove.append(handle) for handle in remove: del self.assocs[handle] return len(remove), len(self.assocs)
Remove expired associations. @return: tuple of (removed associations, remaining associations)
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/store/memstore.py#L38-L49
necaris/python3-openid
openid/extensions/ax.py
AXMessage._checkMode
def _checkMode(self, ax_args): """Raise an exception if the mode in the attribute exchange arguments does not match what is expected for this class. @raises NotAXMessage: When there is no mode value in ax_args at all. @raises AXError: When mode does not match. """ mode = ax_args.get('mode') if isinstance(mode, bytes): mode = str(mode, encoding="utf-8") if mode != self.mode: if not mode: raise NotAXMessage() else: raise AXError('Expected mode %r; got %r' % (self.mode, mode))
python
def _checkMode(self, ax_args): """Raise an exception if the mode in the attribute exchange arguments does not match what is expected for this class. @raises NotAXMessage: When there is no mode value in ax_args at all. @raises AXError: When mode does not match. """ mode = ax_args.get('mode') if isinstance(mode, bytes): mode = str(mode, encoding="utf-8") if mode != self.mode: if not mode: raise NotAXMessage() else: raise AXError('Expected mode %r; got %r' % (self.mode, mode))
Raise an exception if the mode in the attribute exchange arguments does not match what is expected for this class. @raises NotAXMessage: When there is no mode value in ax_args at all. @raises AXError: When mode does not match.
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/extensions/ax.py#L73-L88
necaris/python3-openid
openid/extensions/ax.py
FetchRequest.getExtensionArgs
def getExtensionArgs(self): """Get the serialized form of this attribute fetch request. @returns: The fetch request message parameters @rtype: {unicode:unicode} """ aliases = NamespaceMap() required = [] if_available = [] ax_args = self._newArgs() for type_uri, attribute in self.requested_attributes.items(): if attribute.alias is None: alias = aliases.add(type_uri) else: # This will raise an exception when the second # attribute with the same alias is added. I think it # would be better to complain at the time that the # attribute is added to this object so that the code # that is adding it is identified in the stack trace, # but it's more work to do so, and it won't be 100% # accurate anyway, since the attributes are # mutable. So for now, just live with the fact that # we'll learn about the error later. # # The other possible approach is to hide the error and # generate a new alias on the fly. I think that would # probably be bad. alias = aliases.addAlias(type_uri, attribute.alias) if attribute.required: required.append(alias) else: if_available.append(alias) if attribute.count != 1: ax_args['count.' + alias] = str(attribute.count) ax_args['type.' + alias] = type_uri if required: ax_args['required'] = ','.join(required) if if_available: ax_args['if_available'] = ','.join(if_available) return ax_args
python
def getExtensionArgs(self): """Get the serialized form of this attribute fetch request. @returns: The fetch request message parameters @rtype: {unicode:unicode} """ aliases = NamespaceMap() required = [] if_available = [] ax_args = self._newArgs() for type_uri, attribute in self.requested_attributes.items(): if attribute.alias is None: alias = aliases.add(type_uri) else: # This will raise an exception when the second # attribute with the same alias is added. I think it # would be better to complain at the time that the # attribute is added to this object so that the code # that is adding it is identified in the stack trace, # but it's more work to do so, and it won't be 100% # accurate anyway, since the attributes are # mutable. So for now, just live with the fact that # we'll learn about the error later. # # The other possible approach is to hide the error and # generate a new alias on the fly. I think that would # probably be bad. alias = aliases.addAlias(type_uri, attribute.alias) if attribute.required: required.append(alias) else: if_available.append(alias) if attribute.count != 1: ax_args['count.' + alias] = str(attribute.count) ax_args['type.' + alias] = type_uri if required: ax_args['required'] = ','.join(required) if if_available: ax_args['if_available'] = ','.join(if_available) return ax_args
Get the serialized form of this attribute fetch request. @returns: The fetch request message parameters @rtype: {unicode:unicode}
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/extensions/ax.py#L222-L270
necaris/python3-openid
openid/extensions/ax.py
FetchRequest.getRequiredAttrs
def getRequiredAttrs(self): """Get the type URIs for all attributes that have been marked as required. @returns: A list of the type URIs for attributes that have been marked as required. @rtype: [str] """ required = [] for type_uri, attribute in self.requested_attributes.items(): if attribute.required: required.append(type_uri) return required
python
def getRequiredAttrs(self): """Get the type URIs for all attributes that have been marked as required. @returns: A list of the type URIs for attributes that have been marked as required. @rtype: [str] """ required = [] for type_uri, attribute in self.requested_attributes.items(): if attribute.required: required.append(type_uri) return required
Get the type URIs for all attributes that have been marked as required. @returns: A list of the type URIs for attributes that have been marked as required. @rtype: [str]
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/extensions/ax.py#L272-L285
necaris/python3-openid
openid/extensions/ax.py
FetchRequest.fromOpenIDRequest
def fromOpenIDRequest(cls, openid_request): """Extract a FetchRequest from an OpenID message @param openid_request: The OpenID authentication request containing the attribute fetch request @type openid_request: C{L{openid.server.server.CheckIDRequest}} @rtype: C{L{FetchRequest}} or C{None} @returns: The FetchRequest extracted from the message or None, if the message contained no AX extension. @raises KeyError: if the AuthRequest is not consistent in its use of namespace aliases. @raises AXError: When parseExtensionArgs would raise same. @see: L{parseExtensionArgs} """ message = openid_request.message ax_args = message.getArgs(cls.ns_uri) self = cls() try: self.parseExtensionArgs(ax_args) except NotAXMessage as err: return None if self.update_url: # Update URL must match the openid.realm of the underlying # OpenID 2 message. realm = message.getArg(OPENID_NS, 'realm', message.getArg(OPENID_NS, 'return_to')) if not realm: raise AXError( ("Cannot validate update_url %r " + "against absent realm") % (self.update_url, )) tr = TrustRoot.parse(realm) if not tr.validateURL(self.update_url): raise AXError( "Update URL %r failed validation against realm %r" % (self.update_url, realm, )) return self
python
def fromOpenIDRequest(cls, openid_request): """Extract a FetchRequest from an OpenID message @param openid_request: The OpenID authentication request containing the attribute fetch request @type openid_request: C{L{openid.server.server.CheckIDRequest}} @rtype: C{L{FetchRequest}} or C{None} @returns: The FetchRequest extracted from the message or None, if the message contained no AX extension. @raises KeyError: if the AuthRequest is not consistent in its use of namespace aliases. @raises AXError: When parseExtensionArgs would raise same. @see: L{parseExtensionArgs} """ message = openid_request.message ax_args = message.getArgs(cls.ns_uri) self = cls() try: self.parseExtensionArgs(ax_args) except NotAXMessage as err: return None if self.update_url: # Update URL must match the openid.realm of the underlying # OpenID 2 message. realm = message.getArg(OPENID_NS, 'realm', message.getArg(OPENID_NS, 'return_to')) if not realm: raise AXError( ("Cannot validate update_url %r " + "against absent realm") % (self.update_url, )) tr = TrustRoot.parse(realm) if not tr.validateURL(self.update_url): raise AXError( "Update URL %r failed validation against realm %r" % (self.update_url, realm, )) return self
Extract a FetchRequest from an OpenID message @param openid_request: The OpenID authentication request containing the attribute fetch request @type openid_request: C{L{openid.server.server.CheckIDRequest}} @rtype: C{L{FetchRequest}} or C{None} @returns: The FetchRequest extracted from the message or None, if the message contained no AX extension. @raises KeyError: if the AuthRequest is not consistent in its use of namespace aliases. @raises AXError: When parseExtensionArgs would raise same. @see: L{parseExtensionArgs}
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/extensions/ax.py#L287-L330