Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def embed_ising(source_h, source_J, embedding, target_adjacency, chain_strength=1.0):
source_bqm = dimod.BinaryQuadraticModel.from_ising(source_h, source_J)
target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength)
target_h, target_J, __ = target_bqm.to_ising()
return target_h, target_J | [
"Embed an Ising problem onto a target graph.\n\n Args:\n source_h (dict[variable, bias]/list[bias]):\n Linear biases of the Ising problem. If a list, the list's indices are used as\n variable labels.\n\n source_J (dict[(variable, variable), bias]):\n Quadratic biases of the Ising problem.\n\n embedding (dict):\n Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},\n where s is a source-model variable and t is a target-model variable.\n\n target_adjacency (dict/:class:`networkx.Graph`):\n Adjacency of the target graph as a dict of form {t: Nt, ...},\n where t is a target-graph variable and Nt is its set of neighbours.\n\n chain_strength (float, optional):\n Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note\n that the energy penalty of chain breaks is 2 * `chain_strength`.\n\n Returns:\n tuple: A 2-tuple:\n\n dict[variable, bias]: Linear biases of the target Ising problem.\n\n dict[(variable, variable), bias]: Quadratic biases of the target Ising problem.\n\n Examples:\n This example embeds a fully connected :math:`K_3` graph onto a square target graph.\n Embedding is accomplished by an edge contraction operation on the target graph: target-nodes\n 2 and 3 are chained to represent source-node c.\n\n >>> import dimod\n >>> import networkx as nx\n >>> # Ising problem for a triangular source graph\n >>> h = {}\n >>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}\n >>> # Target graph is a square graph\n >>> target = nx.cycle_graph(4)\n >>> # Embedding from source to target graph\n >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}\n >>> # Embed the Ising problem\n >>> target_h, target_J = dimod.embed_ising(h, J, embedding, target)\n >>> target_J[(0, 1)] == J[('a', 'b')]\n True\n >>> target_J # doctest: +SKIP\n {(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0}\n\n This example embeds a fully connected :math:`K_3` graph onto the target graph\n of a dimod reference structured sampler, `StructureComposite`, using the dimod reference\n `ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to\n represent source-node c.\n\n >>> import dimod\n >>> # Ising problem for a triangular source graph\n >>> h = {}\n >>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}\n >>> # Structured dimod sampler with a structure defined by a square graph\n >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)])\n >>> # Embedding from source to target graph\n >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}\n >>> # Embed the Ising problem\n >>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency)\n >>> # Sample\n >>> samples = sampler.sample_ising(target_h, target_J)\n >>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP\n ... print(sample)\n ...\n {0: 1, 1: -1, 2: -1, 3: -1}\n {0: 1, 1: 1, 2: -1, 3: -1}\n {0: -1, 1: 1, 2: -1, 3: -1}\n\n "
]
|
Please provide a description of the function:def embed_qubo(source_Q, embedding, target_adjacency, chain_strength=1.0):
source_bqm = dimod.BinaryQuadraticModel.from_qubo(source_Q)
target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength)
target_Q, __ = target_bqm.to_qubo()
return target_Q | [
"Embed a QUBO onto a target graph.\n\n Args:\n source_Q (dict[(variable, variable), bias]):\n Coefficients of a quadratic unconstrained binary optimization (QUBO) model.\n\n embedding (dict):\n Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},\n where s is a source-model variable and t is a target-model variable.\n\n target_adjacency (dict/:class:`networkx.Graph`):\n Adjacency of the target graph as a dict of form {t: Nt, ...},\n where t is a target-graph variable and Nt is its set of neighbours.\n\n chain_strength (float, optional):\n Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note\n that the energy penalty of chain breaks is 2 * `chain_strength`.\n\n Returns:\n dict[(variable, variable), bias]: Quadratic biases of the target QUBO.\n\n Examples:\n This example embeds a square source graph onto fully connected :math:`K_5` graph.\n Embedding is accomplished by an edge deletion operation on the target graph: target-node\n 0 is not used.\n\n >>> import dimod\n >>> import networkx as nx\n >>> # QUBO problem for a square graph\n >>> Q = {(1, 1): -4.0, (1, 2): 4.0, (2, 2): -4.0, (2, 3): 4.0,\n ... (3, 3): -4.0, (3, 4): 4.0, (4, 1): 4.0, (4, 4): -4.0}\n >>> # Target graph is a fully connected k5 graph\n >>> K_5 = nx.complete_graph(5)\n >>> 0 in K_5\n True\n >>> # Embedding from source to target graph\n >>> embedding = {1: {4}, 2: {3}, 3: {1}, 4: {2}}\n >>> # Embed the QUBO\n >>> target_Q = dimod.embed_qubo(Q, embedding, K_5)\n >>> (0, 0) in target_Q\n False\n >>> target_Q # doctest: +SKIP\n {(1, 1): -4.0,\n (1, 2): 4.0,\n (2, 2): -4.0,\n (2, 4): 4.0,\n (3, 1): 4.0,\n (3, 3): -4.0,\n (4, 3): 4.0,\n (4, 4): -4.0}\n\n This example embeds a square graph onto the target graph of a dimod reference structured\n sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a\n fully connected :math:`K_5` graph specified.\n\n >>> import dimod\n >>> import networkx as nx\n >>> # QUBO problem for a square graph\n >>> Q = {(1, 1): -4.0, (1, 2): 4.0, (2, 2): -4.0, (2, 3): 4.0,\n ... (3, 3): -4.0, (3, 4): 4.0, (4, 1): 4.0, (4, 4): -4.0}\n >>> # Structured dimod sampler with a structure defined by a K5 graph\n >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), list(K_5.nodes), list(K_5.edges))\n >>> sampler.adjacency # doctest: +SKIP\n {0: {1, 2, 3, 4},\n 1: {0, 2, 3, 4},\n 2: {0, 1, 3, 4},\n 3: {0, 1, 2, 4},\n 4: {0, 1, 2, 3}}\n >>> # Embedding from source to target graph\n >>> embedding = {0: [4], 1: [3], 2: [1], 3: [2], 4: [0]}\n >>> # Embed the QUBO\n >>> target_Q = dimod.embed_qubo(Q, embedding, sampler.adjacency)\n >>> # Sample\n >>> samples = sampler.sample_qubo(target_Q)\n >>> for datum in samples.data(): # doctest: +SKIP\n ... print(datum)\n ...\n Sample(sample={1: 0, 2: 1, 3: 1, 4: 0}, energy=-8.0)\n Sample(sample={1: 1, 2: 0, 3: 0, 4: 1}, energy=-8.0)\n Sample(sample={1: 1, 2: 0, 3: 0, 4: 0}, energy=-4.0)\n Sample(sample={1: 1, 2: 1, 3: 0, 4: 0}, energy=-4.0)\n Sample(sample={1: 0, 2: 1, 3: 0, 4: 0}, energy=-4.0)\n Sample(sample={1: 1, 2: 1, 3: 1, 4: 0}, energy=-4.0)\n >>> # Snipped above samples for brevity\n\n "
]
|
Please provide a description of the function:def unembed_sampleset(target_sampleset, embedding, source_bqm,
chain_break_method=None, chain_break_fraction=False):
if chain_break_method is None:
chain_break_method = majority_vote
variables = list(source_bqm)
try:
chains = [embedding[v] for v in variables]
except KeyError:
raise ValueError("given bqm does not match the embedding")
chain_idxs = [[target_sampleset.variables.index[v] for v in chain] for chain in chains]
record = target_sampleset.record
unembedded, idxs = chain_break_method(record.sample, chain_idxs)
# dev note: this is a bug in dimod that empty unembedded is not handled,
# in the future this try-except can be removed
try:
energies = source_bqm.energies((unembedded, variables))
except ValueError:
datatypes = [('sample', np.dtype(np.int8), (len(variables),)), ('energy', np.float)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names
if name not in {'sample', 'energy'})
if chain_break_fraction:
datatypes.append(('chain_break_fraction', np.float64))
# there are no samples so everything is empty
data = np.rec.array(np.empty(0, dtype=datatypes))
return dimod.SampleSet(data, variables, target_sampleset.info.copy(), target_sampleset.vartype)
reserved = {'sample', 'energy'}
vectors = {name: record[name][idxs]
for name in record.dtype.names if name not in reserved}
if chain_break_fraction:
vectors['chain_break_fraction'] = broken_chains(record.sample, chain_idxs).mean(axis=1)[idxs]
return dimod.SampleSet.from_samples((unembedded, variables),
target_sampleset.vartype,
energy=energies,
info=target_sampleset.info.copy(),
**vectors) | [
"Unembed the samples set.\n\n Construct a sample set for the source binary quadratic model (BQM) by\n unembedding the given samples from the target BQM.\n\n Args:\n target_sampleset (:obj:`dimod.SampleSet`):\n SampleSet from the target BQM.\n\n embedding (dict):\n Mapping from source graph to target graph as a dict of form\n {s: {t, ...}, ...}, where s is a source variable and t is a target\n variable.\n\n source_bqm (:obj:`dimod.BinaryQuadraticModel`):\n Source binary quadratic model.\n\n chain_break_method (function, optional):\n Method used to resolve chain breaks.\n See :mod:`dwave.embedding.chain_breaks`.\n\n chain_break_fraction (bool, optional, default=False):\n If True, a 'chain_break_fraction' field is added to the unembedded\n samples which report what fraction of the chains were broken before\n unembedding.\n\n Returns:\n :obj:`.SampleSet`:\n\n Examples:\n\n >>> import dimod\n ...\n >>> # say we have a bqm on a triangle and an embedding\n >>> J = {('a', 'b'): -1, ('b', 'c'): -1, ('a', 'c'): -1}\n >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, J)\n >>> embedding = {'a': [0, 1], 'b': [2], 'c': [3]}\n ...\n >>> # and some samples from the embedding\n >>> samples = [{0: -1, 1: -1, 2: -1, 3: -1}, # [0, 1] is unbroken\n {0: -1, 1: +1, 2: +1, 3: +1}] # [0, 1] is broken\n >>> energies = [-3, 1]\n >>> embedded = dimod.SampleSet.from_samples(samples, dimod.SPIN, energies)\n ...\n >>> # unembed\n >>> samples = dwave.embedding.unembed_sampleset(embedded, embedding, bqm)\n >>> samples.record.sample # doctest: +SKIP\n array([[-1, -1, -1],\n [ 1, 1, 1]], dtype=int8)\n\n "
]
|
Please provide a description of the function:def _adjacency_to_edges(adjacency):
edges = set()
for u in adjacency:
for v in adjacency[u]:
try:
edge = (u, v) if u <= v else (v, u)
except TypeError:
# Py3 does not allow sorting of unlike types
if (v, u) in edges:
continue
edge = (u, v)
edges.add(edge)
return edges | [
"determine from an adjacency the list of edges\n if (u, v) in edges, then (v, u) should not be"
]
|
Please provide a description of the function:def _embed_state(embedding, state):
return {u: state[v] for v, chain in embedding.items() for u in chain} | [
"Embed a single state/sample by spreading it's values over the chains in the embedding"
]
|
Please provide a description of the function:def parameters(self):
param = self.child.parameters.copy()
param['chain_strength'] = []
param['chain_break_fraction'] = []
return param | [
"dict[str, list]: Parameters in the form of a dict.\n\n For an instantiated composed sampler, keys are the keyword parameters accepted by the child sampler\n and parameters added by the composite such as those related to chains.\n\n Examples:\n This example views parameters of a composed sampler using a D-Wave system selected by\n the user's default\n :std:doc:`D-Wave Cloud Client configuration file. <cloud-client:intro>`\n\n >>> from dwave.system.samplers import DWaveSampler\n >>> from dwave.system.composites import EmbeddingComposite\n ...\n >>> sampler = EmbeddingComposite(DWaveSampler())\n >>> sampler.parameters # doctest: +SKIP\n {'anneal_offsets': ['parameters'],\n 'anneal_schedule': ['parameters'],\n 'annealing_time': ['parameters'],\n 'answer_mode': ['parameters'],\n 'auto_scale': ['parameters'],\n >>> # Snipped above response for brevity\n\n See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_ for explanations of technical\n terms in descriptions of Ocean tools.\n "
]
|
Please provide a description of the function:def sample(self, bqm, chain_strength=1.0, chain_break_fraction=True, **parameters):
# solve the problem on the child system
child = self.child
# apply the embedding to the given problem to map it to the child sampler
__, target_edgelist, target_adjacency = child.structure
# add self-loops to edgelist to handle singleton variables
source_edgelist = list(bqm.quadratic) + [(v, v) for v in bqm.linear]
# get the embedding
embedding = minorminer.find_embedding(source_edgelist, target_edgelist)
if bqm and not embedding:
raise ValueError("no embedding found")
bqm_embedded = embed_bqm(bqm, embedding, target_adjacency,
chain_strength=chain_strength,
smear_vartype=dimod.SPIN)
if 'initial_state' in parameters:
parameters['initial_state'] = _embed_state(embedding, parameters['initial_state'])
response = child.sample(bqm_embedded, **parameters)
return unembed_sampleset(response, embedding, source_bqm=bqm,
chain_break_fraction=chain_break_fraction) | [
"Sample from the provided binary quadratic model.\n\n Also set parameters for handling a chain, the set of vertices in a target graph that\n represents a source-graph vertex; when a D-Wave system is the sampler, it is a set\n of qubits that together represent a variable of the binary quadratic model being\n minor-embedded.\n\n Args:\n bqm (:obj:`dimod.BinaryQuadraticModel`):\n Binary quadratic model to be sampled from.\n\n chain_strength (float, optional, default=1.0):\n Magnitude of the quadratic bias (in SPIN-space) applied between variables to create\n chains. The energy penalty of chain breaks is 2 * `chain_strength`.\n\n chain_break_fraction (bool, optional, default=True):\n If True, the unembedded response contains a ‘chain_break_fraction’ field that\n reports the fraction of chains broken before unembedding.\n\n **parameters:\n Parameters for the sampling method, specified by the child sampler.\n\n Returns:\n :class:`dimod.SampleSet`: A `dimod` :obj:`~dimod.SampleSet` object.\n\n Examples:\n This example submits an triangle-structured Ising problem to a D-Wave solver, selected\n by the user's default\n :std:doc:`D-Wave Cloud Client configuration file <cloud-client:intro>`,\n by minor-embedding the problem's variables to physical qubits.\n\n >>> from dwave.system.samplers import DWaveSampler\n >>> from dwave.system.composites import EmbeddingComposite\n >>> import dimod\n ...\n >>> sampler = EmbeddingComposite(DWaveSampler())\n >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': 0.5, 'bc': 0.5, 'ca': 0.5})\n >>> response = sampler.sample(bqm, chain_strength=2)\n >>> response.first: # doctest: +SKIP\n Sample(sample={'a': -1, 'b': 1, 'c': 1}, energy=-0.5,\n num_occurrences=1, chain_break_fraction=0.0)\n\n See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_\n for explanations of technical terms in descriptions of Ocean tools.\n "
]
|
Please provide a description of the function:def sample(self, bqm, chain_strength=1.0, chain_break_fraction=True, **parameters):
# solve the problem on the child system
child = self.child
# apply the embedding to the given problem to map it to the child sampler
__, __, target_adjacency = child.structure
# get the embedding
embedding = self.embedding
bqm_embedded = embed_bqm(bqm, embedding, target_adjacency,
chain_strength=chain_strength,
smear_vartype=dimod.SPIN)
if 'initial_state' in parameters:
parameters['initial_state'] = _embed_state(embedding, parameters['initial_state'])
response = child.sample(bqm_embedded, **parameters)
return unembed_sampleset(response, embedding, source_bqm=bqm,
chain_break_fraction=chain_break_fraction) | [
"Sample from the provided binary quadratic model.\n\n Also set parameters for handling a chain, the set of vertices in a target graph that\n represents a source-graph vertex; when a D-Wave system is the sampler, it is a set\n of qubits that together represent a variable of the binary quadratic model being\n minor-embedded.\n\n Args:\n bqm (:obj:`dimod.BinaryQuadraticModel`):\n Binary quadratic model to be sampled from.\n\n chain_strength (float, optional, default=1.0):\n Magnitude of the quadratic bias (in SPIN-space) applied between variables to create\n chains. The energy penalty of chain breaks is 2 * `chain_strength`.\n\n chain_break_fraction (bool, optional, default=True):\n If True, the unembedded response contains a ‘chain_break_fraction’ field\n that reports the fraction of chains broken before unembedding.\n\n **parameters:\n Parameters for the sampling method, specified by the child sampler.\n\n Returns:\n :class:`dimod.SampleSet`: A `dimod` :obj:`~dimod.SampleSet` object.\n\n Examples:\n This example submits an triangle-structured problem to a D-Wave solver, selected\n by the user's default\n :std:doc:`D-Wave Cloud Client configuration file <cloud-client:intro>`,\n using a specified minor-embedding of the problem’s variables to physical qubits.\n\n >>> from dwave.system.samplers import DWaveSampler\n >>> from dwave.system.composites import FixedEmbeddingComposite\n >>> import dimod\n ...\n >>> sampler = FixedEmbeddingComposite(DWaveSampler(), {'a': [0, 4], 'b': [1, 5], 'c': [2, 6]})\n >>> response = sampler.sample_ising({}, {'ab': 0.5, 'bc': 0.5, 'ca': 0.5}, chain_strength=2)\n >>> response.first # doctest: +SKIP\n Sample(sample={'a': 1, 'b': -1, 'c': 1}, energy=-0.5, num_occurrences=1, chain_break_fraction=0.0)\n\n See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_\n for explanations of technical terms in descriptions of Ocean tools.\n\n "
]
|
Please provide a description of the function:def sample(self, bqm, chain_strength=1.0, chain_break_fraction=True, **parameters):
if self.embedding is None:
# Find embedding
child = self.child # Solve the problem on the child system
__, target_edgelist, target_adjacency = child.structure
source_edgelist = list(bqm.quadratic) + [(v, v) for v in bqm.linear] # Add self-loops for single variables
embedding = minorminer.find_embedding(source_edgelist, target_edgelist)
# Initialize properties that need embedding
super(LazyFixedEmbeddingComposite, self)._set_graph_related_init(embedding=embedding)
return super(LazyFixedEmbeddingComposite, self).sample(bqm, chain_strength=chain_strength,
chain_break_fraction=chain_break_fraction, **parameters) | [
"Sample the binary quadratic model.\n\n Note: At the initial sample(..) call, it will find a suitable embedding and initialize the remaining attributes\n before sampling the bqm. All following sample(..) calls will reuse that initial embedding.\n\n Args:\n bqm (:obj:`dimod.BinaryQuadraticModel`):\n Binary quadratic model to be sampled from.\n\n chain_strength (float, optional, default=1.0):\n Magnitude of the quadratic bias (in SPIN-space) applied between variables to create\n chains. Note that the energy penalty of chain breaks is 2 * `chain_strength`.\n\n chain_break_fraction (bool, optional, default=True):\n If True, a ‘chain_break_fraction’ field is added to the unembedded response which report\n what fraction of the chains were broken before unembedding.\n\n **parameters:\n Parameters for the sampling method, specified by the child sampler.\n Returns:\n :class:`dimod.SampleSet`\n "
]
|
Please provide a description of the function:def _accumulate_random(count, found, oldthing, newthing):
if randint(1, count + found) <= found:
return count + found, newthing
else:
return count + found, oldthing | [
"This performs on-line random selection.\n\n We have a stream of objects\n\n o_1,c_1; o_2,c_2; ...\n\n where there are c_i equivalent objects like o_1. We'd like to pick\n a random object o uniformly at random from the list\n\n [o_1]*c_1 + [o_2]*c_2 + ...\n\n (actually, this algorithm allows arbitrary positive weights, not\n necessarily integers) without spending the time&space to actually\n create that list. Luckily, the following works:\n\n thing = None\n c_tot\n for o_n, c_n in things:\n c_tot += c_n\n if randint(1,c_tot) <= c_n:\n thing = o_n\n\n This function is written in an accumulator format, so it can be\n used one call at a time:\n\n EXAMPLE:\n > thing = None\n > count = 0\n > for i in range(10):\n > c = 10-i\n > count, thing = accumulate_random(count,c,thing,i)\n\n\n INPUTS:\n count: integer, sum of weights found before newthing\n found: integer, weight for newthing\n oldthing: previously selected object (will never be selected\n if count == 0)\n newthing: incoming object\n\n OUTPUT:\n (newcount, pick): newcount is count+found, pick is the newly\n selected object.\n "
]
|
Please provide a description of the function:def _bulk_to_linear(M, N, L, qubits):
"Converts a list of chimera coordinates to linear indices."
return [2 * L * N * x + 2 * L * y + L * u + k for x, y, u, k in qubits] | []
|
Please provide a description of the function:def _to_linear(M, N, L, q):
"Converts a qubit in chimera coordinates to its linear index."
(x, y, u, k) = q
return 2 * L * N * x + 2 * L * y + L * u + k | []
|
Please provide a description of the function:def _bulk_to_chimera(M, N, L, qubits):
"Converts a list of linear indices to chimera coordinates."
return [(q // N // L // 2, (q // L // 2) % N, (q // L) % 2, q % L) for q in qubits] | []
|
Please provide a description of the function:def _to_chimera(M, N, L, q):
"Converts a qubit's linear index to chimera coordinates."
return (q // N // L // 2, (q // L // 2) % N, (q // L) % 2, q % L) | []
|
Please provide a description of the function:def _chimera_neighbors(M, N, L, q):
"Returns a list of neighbors of (x,y,u,k) in a perfect :math:`C_{M,N,L}`"
(x, y, u, k) = q
n = [(x, y, 1 - u, l) for l in range(L)]
if u == 0:
if x:
n.append((x - 1, y, u, k))
if x < M - 1:
n.append((x + 1, y, u, k))
else:
if y:
n.append((x, y - 1, u, k))
if y < N - 1:
n.append((x, y + 1, u, k))
return n | []
|
Please provide a description of the function:def random_processor(M, N, L, qubit_yield, num_evil=0):
# replacement for lambda in edge filter below that works with bot h
def edge_filter(pq):
# we have to unpack the (p,q) edge
p, q = pq
return q in qubits and p < q
qubits = [(x, y, u, k) for x in range(M) for y in range(N) for u in [0, 1] for k in range(L)]
nqubits = len(qubits)
qubits = set(sample(qubits, int(nqubits * qubit_yield)))
edges = ((p, q) for p in qubits for q in _chimera_neighbors(M, N, L, p))
edges = list(filter(edge_filter, edges))
possibly_evil_edges = [(p, q) for p, q in edges if p[:2] == q[:2]]
num_evil = min(num_evil, len(possibly_evil_edges))
evil_edges = sample(possibly_evil_edges, num_evil)
return processor(set(edges) - set(evil_edges), M=M, N=N, L=L, linear=False) | [
"A utility function that generates a random :math:`C_{M,N,L}` missing some\n percentage of its qubits.\n\n INPUTS:\n M,N,L: the chimera parameters\n qubit_yield: ratio (0 <= qubit_yield <= 1) of #{qubits}/(2*M*N*L)\n num_evil: number of broken in-cell couplers between working qubits\n\n OUTPUT:\n proc (:class:`processor`): a :class:`processor` instance with a random\n collection of qubits and couplers as specified\n "
]
|
Please provide a description of the function:def vline_score(self, x, ymin, ymax):
return self._vline_score[x, ymin, ymax] | [
"Returns the number of unbroken paths of qubits\n\n >>> [(x,y,1,k) for y in range(ymin,ymax+1)]\n\n for :math:`k = 0,1,\\cdots,L-1`. This is precomputed for speed.\n "
]
|
Please provide a description of the function:def hline_score(self, y, xmin, xmax):
return self._hline_score[y, xmin, xmax] | [
"Returns the number of unbroken paths of qubits\n\n >>> [(x,y,0,k) for x in range(xmin,xmax+1)]\n\n for :math:`k = 0,1,\\cdots,L-1`. This is precomputed for speed.\n "
]
|
Please provide a description of the function:def _compute_vline_scores(self):
M, N, L = self.M, self.N, self.L
vline_score = {}
for x in range(M):
laststart = [0 if (x, 0, 1, k) in self else None for k in range(L)]
for y in range(N):
block = [0] * (y + 1)
for k in range(L):
if (x, y, 1, k) not in self:
laststart[k] = None
elif laststart[k] is None:
laststart[k] = y
block[y] += 1
elif y and (x, y, 1, k) not in self[x, y - 1, 1, k]:
laststart[k] = y
else:
for y1 in range(laststart[k], y + 1):
block[y1] += 1
for y1 in range(y + 1):
vline_score[x, y1, y] = block[y1]
self._vline_score = vline_score | [
"Does the hard work to prepare ``vline_score``.\n "
]
|
Please provide a description of the function:def _compute_hline_scores(self):
M, N, L = self.M, self.N, self.L
hline_score = {}
for y in range(N):
laststart = [0 if (0, y, 0, k) in self else None for k in range(L)]
for x in range(M):
block = [0] * (x + 1)
for k in range(L):
if (x, y, 0, k) not in self:
laststart[k] = None
elif laststart[k] is None:
laststart[k] = x
block[x] += 1
elif x and (x, y, 0, k) not in self[x - 1, y, 0, k]:
laststart[k] = x
else:
for x1 in range(laststart[k], x + 1):
block[x1] += 1
for x1 in range(x + 1):
hline_score[y, x1, x] = block[x1]
self._hline_score = hline_score | [
"Does the hard work to prepare ``hline_score``.\n "
]
|
Please provide a description of the function:def _compute_biclique_sizes(self, recompute=False):
if recompute or not self._biclique_size_computed:
self._biclique_size = {}
self._biclique_size_to_length = defaultdict(dict)
self._biclique_length_to_size = defaultdict(dict)
else:
return
M, N = self.M, self.N
for xmax in range(M):
for xmin in range(xmax + 1):
for ymax in range(N):
for ymin in range(ymax + 1):
ab = self.biclique_size(xmin, xmax, ymin, ymax)
wh = xmax - xmin + 1, ymax - ymin + 1
self._biclique_size_to_length[ab][
wh] = (xmin, xmax, ymin, ymax)
self._biclique_length_to_size[wh][
ab] = (xmin, xmax, ymin, ymax)
self._biclique_size_computed = True | [
"Calls ``self.biclique_size(...)`` for every rectangle contained in this\n processor, to fill the biclique size cache.\n\n INPUTS:\n recompute: if ``True``, then we dump the existing cache and compute\n all biclique sizes from scratch. (default: ``False``)\n "
]
|
Please provide a description of the function:def biclique_size(self, xmin, xmax, ymin, ymax):
try:
return self._biclique_size[xmin, xmax, ymin, ymax]
except KeyError:
hscore = self.hline_score(ymin, xmin, xmax)
vscore = self.vline_score(xmin, ymin, ymax)
if ymin < ymax:
hscore += self.biclique_size(xmin, xmax, ymin + 1, ymax)[0]
if xmin < xmax:
vscore += self.biclique_size(xmin + 1, xmax, ymin, ymax)[1]
self._biclique_size[xmin, xmax, ymin, ymax] = hscore, vscore
return hscore, vscore | [
"Returns the size parameters ``(m,n)`` of the complete bipartite graph\n :math:`K_{m,n}` comprised of ``m`` unbroken chains of horizontally-aligned qubits\n and ``n`` unbroken chains of vertically-aligned qubits (known as line\n bundles)\n\n INPUTS:\n xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle\n where we look for unbroken chains. These ranges include both\n endpoints.\n\n OUTPUTS:\n m,n: integers corresponding to the number of horizontal and\n vertical line bundles contained in this rectangle.\n "
]
|
Please provide a description of the function:def biclique(self, xmin, xmax, ymin, ymax):
Aside = sum((self.maximum_hline_bundle(y, xmin, xmax)
for y in range(ymin, ymax + 1)), [])
Bside = sum((self.maximum_vline_bundle(x, ymin, ymax)
for x in range(xmin, xmax + 1)), [])
return Aside, Bside | [
"Compute a maximum-sized complete bipartite graph contained in the\n rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of\n qubits is either a vertical line or a horizontal line.\n\n INPUTS:\n xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle\n where we look for unbroken chains. These ranges include both\n endpoints.\n\n OUTPUT:\n (A_side, B_side): a tuple of two lists containing lists of qubits.\n the lists found in ``A_side`` and ``B_side`` are chains of qubits.\n These lists of qubits are arranged so that\n\n >>> [zip(chain,chain[1:]) for chain in A_side]\n\n and\n\n >>> [zip(chain,chain[1:]) for chain in B_side]\n\n are lists of valid couplers.\n "
]
|
Please provide a description of the function:def _contains_line(self, line):
return all(v in self for v in line) and all(u in self[v] for u, v in zip(line, line[1::])) | [
"Test if a chain of qubits is completely contained in ``self``. In\n particular, test if all qubits are present and the couplers\n connecting those qubits are also connected.\n\n NOTE: this function assumes that ``line`` is a list or tuple of\n qubits which satisfies the precondition that ``(line[i],line[i+1])``\n is supposed to be a coupler for all ``i``.\n\n INPUTS:\n line: a list of qubits satisfying the above precondition\n\n OUTPUT:\n boolean\n "
]
|
Please provide a description of the function:def maximum_vline_bundle(self, x0, y0, y1):
y_range = range(y1, y0 - 1, -1) if y0 < y1 else range(y1, y0 + 1)
vlines = [[(x0, y, 1, k) for y in y_range] for k in range(self.L)]
return list(filter(self._contains_line, vlines)) | [
"Compute a maximum set of vertical lines in the unit cells ``(x0,y)``\n for :math:`y0 \\leq y \\leq y1`.\n\n INPUTS:\n y0,x0,x1: int\n\n OUTPUT:\n list of lists of qubits\n "
]
|
Please provide a description of the function:def maximum_hline_bundle(self, y0, x0, x1):
x_range = range(x0, x1 + 1) if x0 < x1 else range(x0, x1 - 1, -1)
hlines = [[(x, y0, 0, k) for x in x_range] for k in range(self.L)]
return list(filter(self._contains_line, hlines)) | [
"Compute a maximum set of horizontal lines in the unit cells ``(x,y0)``\n for :math:`x0 \\leq x \\leq x1`.\n\n INPUTS:\n y0,x0,x1: int\n\n OUTPUT:\n list of lists of qubits\n "
]
|
Please provide a description of the function:def maximum_ell_bundle(self, ell):
(x0, x1, y0, y1) = ell
hlines = self.maximum_hline_bundle(y0, x0, x1)
vlines = self.maximum_vline_bundle(x0, y0, y1)
if self.random_bundles:
shuffle(hlines)
shuffle(vlines)
return [v + h for h, v in zip(hlines, vlines)] | [
"Return a maximum ell bundle in the rectangle bounded by\n\n :math:`\\{x0,x1\\} \\\\times \\{y0,y1\\}`\n\n with vertical component\n\n :math:`(x0,y0) ... (x0,y1) = {x0} \\\\times \\{y0,...,y1\\}`\n\n and horizontal component\n\n :math:`(x0,y0) ... (x1,y0) = \\{x0,...,x1\\} \\\\times \\{y0\\}`.\n\n Note that we don't require :math:`x0 \\leq x1` or :math:`y0 \\leq y1`. We go\n through some shenanigans so that the qubits we return\n are all in a path. A nice side-effect of this is that\n\n >>> chains = maximum_ell_bundle(...)\n >>> edges = [zip(path,path[:-1]) for path in chains]\n\n where ``edges`` will be a list of lists of chain edges.\n\n INPUTS::\n ell: a tuple of 4 integers defining the ell, ``(x0, x1, y0, y1)``\n\n OUTPUT::\n chains: list of lists of qubits\n\n Note: this function only to be called to construct a\n native clique embedding *after* the block embedding has\n been constructed. Using this to evaluate the goodness\n of an ell block will be slow.\n "
]
|
Please provide a description of the function:def _combine_clique_scores(self, rscore, hbar, vbar):
(y0, xmin, xmax) = hbar
(x0, ymin, ymax) = vbar
if rscore is None:
rscore = 0
hscore = self.hline_score(y0, xmin, xmax)
vscore = self.vline_score(x0, ymin, ymax)
if vscore < hscore:
score = rscore + vscore
else:
score = rscore + hscore
return score | [
"Computes the score of a partial native clique embedding given the score\n attained by the already-placed ells, together with the ell block\n defined by ``hbar = (y0,xmin,xmax)``, and ``vbar = (x0,ymin,ymax)``.\n\n In the plain :class:`eden_processor` class, this is simply the number of ells\n contained in the partial native clique after adding the new ells.\n "
]
|
Please provide a description of the function:def maxCliqueWithRectangle(self, R, maxCWR):
(xmin, xmax, ymin, ymax) = R
best = nothing = 0, None, None, 1
bestscore = None
count = 0
N = self.N
Xlist = (xmin, xmax, xmin + 1, xmax), (xmax, xmin, xmin, xmax - 1)
Ylist = (ymin - 1, ymax, ymin - 1,
ymax), (ymax + 1, ymin, ymin, ymax + 1)
XY = [(X, Y) for X in Xlist for Y in Ylist if 0 <= Y[2] <= Y[3] < N]
bests = []
for X, Y in XY:
x0, x1, nxmin, nxmax = X
y0, y1, nymin, nymax = Y
r = nxmin, nxmax, nymin, nymax
try:
rscore, rell, rparent, nr = maxCWR[r]
except:
rscore, nr = None, 1
score = self._combine_clique_scores(
rscore, (y0, xmin, xmax), (x0, nymin, nymax))
if bestscore is None or score > bestscore:
bestscore = score
count = 0
if score == bestscore:
count, best = _accumulate_random(
count, nr, best, (score, (x0, x1, y0, y1), r, nr))
return bestscore, best | [
"This does the dirty work for :func:`nativeCliqueEmbed`. Not meant to be\n understood or called on its own. Guaranteed to maintain the inductive\n hypothesis that ``maxCWR`` is optimal. We put in the tiniest amount of\n effort to return a uniform random choice of a maximum-sized native\n clique embedding.\n\n INPUTS:\n R (tuple): the rectangle specified as a tuple ``(xmin,xmax,ymin,ymax)``\n\n maxCWR (dict): the dictionary we're building inductively which maps\n ``R -> (score, ell, r, num)`` where score is the size of the best\n clique with working rectangle ``R``, and ``ell`` and ``r`` indicate how to\n reconstruct that clique: add the maximum line bundle in ``ell``, and\n recursively examine the working rectangle ``r``. The parameter ``num`` is the number\n of cliques with ``R`` as a working rectangle and size equal to ``score``.\n\n OUTPUT:\n score (int): the score for the returned clique (just ``len(clique)``\n in the class :class:`eden_processor`; may differ in subclasses)\n\n best (tuple): a tuple ``(score, ell, parent, num)`` to be stored in\n ``maxCWR[R]``.\n\n * score: as above\n * ell: ``(x0,x1,y0,y1)`` defines the unit cells in an ell-shaped\n region: ``(x0,y1),...,(x0,y0),...,(x1,y0)``\n * parent: the rectangle ``R1`` for which the clique generated\n recursively by looking up ``maxCWR[R1]`` as described above.\n * num: the number of partial native clique embeddings with ``R``\n as a working rectangle and a score of ``score``.\n\n "
]
|
Please provide a description of the function:def nativeCliqueEmbed(self, width):
maxCWR = {}
M, N = self.M, self.N
maxscore = None
count = 0
key = None
for w in range(width + 2):
h = width - w - 2
for ymin in range(N - h):
ymax = ymin + h
for xmin in range(M - w):
xmax = xmin + w
R = (xmin, xmax, ymin, ymax)
score, best = self.maxCliqueWithRectangle(R, maxCWR)
maxCWR[R] = best
if maxscore is None or (score is not None and maxscore < score):
maxscore = score
key = None # this gets overwritten immediately
count = 0 # this gets overwritten immediately
if maxscore == score:
count, key = _accumulate_random(count, best[3], key, R)
clique = []
while key in maxCWR:
score, ell, key, num = maxCWR[key]
if ell is not None:
meb = self.maximum_ell_bundle(ell)
clique.extend(meb)
return maxscore, clique | [
"Compute a maximum-sized native clique embedding in an induced\n subgraph of chimera with all chainlengths ``width+1``.\n\n INPUTS:\n width: width of the squares to search, also `chainlength`-1\n\n OUTPUT:\n score: the score for the returned clique (just ``len(clique)``\n in the class :class:`eden_processor`; may differ in subclasses)\n\n clique: a list containing lists of qubits, each associated\n to a chain. These lists of qubits are carefully\n arranged so that\n\n >>> [zip(chain,chain[1:]) for chain in clique]\n\n is a list of valid couplers.\n\n "
]
|
Please provide a description of the function:def largestNativeClique(self, max_chain_length=None):
bigclique = []
bestscore = None
if max_chain_length is None:
wmax = min(self.M, self.N)
else:
wmax = max_chain_length - 1
for w in range(wmax + 1):
score, clique = self.nativeCliqueEmbed(w)
if bestscore is None or score > bestscore:
bigclique = clique
bestscore = score
return bestscore, bigclique | [
"Returns the largest native clique embedding we can find on the\n processor, with the shortest chainlength possible (for that\n clique size).\n\n OUTPUT:\n score: the score for the returned clique (just ``len(clique)``\n in the class :class:`eden_processor`; may differ in subclasses)\n\n clique: a list containing lists of qubits, each associated\n to a chain. These lists of qubits are carefully\n arranged so that\n\n >>> [zip(chain,chain[1:]) for chain in clique]\n\n is a list of valid couplers.\n\n CAVEAT: we assume that the only failed couplers connected to working\n qubits are between two cells. Any pair of working qubits on opposite\n sides of the same unit cell are assumed to be connected by working\n couplers.\n "
]
|
Please provide a description of the function:def largestNativeBiClique(self, chain_imbalance=0, max_chain_length=None):
self._compute_biclique_sizes()
Len2Siz = self._biclique_length_to_size
Siz2Len = self._biclique_size_to_length
overkill = self.M + self.N
if max_chain_length is None:
max_chain_length = overkill
if chain_imbalance is None:
chain_imbalance = overkill
def acceptable_chains(t):
a, b = t
return a <= max_chain_length and b <= max_chain_length and abs(a - b) <= chain_imbalance
def sortedpair(k):
return min(k), max(k)
feasible_sizes = {mn for mn, S in Siz2Len.items()
if any(map(acceptable_chains, S))}
m, n = max(feasible_sizes, key=sortedpair)
best_r = None
best_ab = overkill, overkill
for mn in set(((m, n), (n, m))) & feasible_sizes:
for ab, r in Siz2Len[mn].items():
ab = max(ab), min(ab)
if acceptable_chains(ab) and ab < best_ab:
best_ab = ab
best_r = r
bestsize = sortedpair(self.biclique_size(*best_r))
bestbiclique = self.biclique(*best_r)
return bestsize, (bestbiclique[0], bestbiclique[1]) | [
"Returns a native embedding for the complete bipartite graph :math:`K_{n,m}`\n for :math:`n \\leq m`; where :math:`n` is as large as possible and :math:`m` is as large as\n possible subject to :math:`n`. The native embedding of a complete bipartite\n graph is a set of horizontally-aligned qubits connected in lines\n together with an equal-sized set of vertically-aligned qubits\n connected in lines.\n\n INPUTS:\n chain_imbalance: how big of a difference to allow between the\n chain lengths on the two sides of the bipartition. If ``None``,\n then we allow an arbitrary imbalance. (default: ``0``)\n\n max_chain_length: longest chain length to consider or ``None`` if chain\n lengths are allowed to be unbounded. (default: ``None``)\n\n OUTPUT:\n score (tuple): the score for the returned clique (just ``(n,m)`` in the class\n :class:`eden_processor`; may differ in subclasses)\n\n embedding (tuple): a tuple of two lists containing lists of qubits.\n If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and\n ``B_side`` are chains of qubits.\n These lists of qubits are arranged so that\n\n >>> [zip(chain,chain[1:]) for chain in A_side]\n\n and\n\n >>> [zip(chain,chain[1:]) for chain in B_side]\n\n are lists of valid couplers.\n\n "
]
|
Please provide a description of the function:def _compute_all_deletions(self):
minimum_evil = []
for disabled_qubits in map(set, product(*self._evil)):
newmin = []
for s in minimum_evil:
if s < disabled_qubits:
break
elif disabled_qubits < s:
continue
newmin.append(s)
else:
minimum_evil = newmin + [disabled_qubits]
return minimum_evil | [
"Returns all minimal edge covers of the set of evil edges.\n "
]
|
Please provide a description of the function:def _subprocessor(self, disabled_qubits):
edgelist = [(p, q) for p, q in self._edgelist if
p not in disabled_qubits and
q not in disabled_qubits]
return eden_processor(edgelist, self.M, self.N, self.L, random_bundles=self._random_bundles) | [
"Create a subprocessor by deleting a set of qubits. We assume\n this removes all evil edges, and return an :class:`eden_processor`\n instance.\n "
]
|
Please provide a description of the function:def _compute_deletions(self):
M, N, L, edgelist = self.M, self.N, self.L, self._edgelist
if 2**len(self._evil) <= self._proc_limit:
deletions = self._compute_all_deletions()
self._processors = [self._subprocessor(d) for d in deletions]
else:
self._processors = None | [
"If there are fewer than self._proc_limit possible deletion\n sets, compute all subprocessors obtained by deleting a\n minimal subset of qubits.\n "
]
|
Please provide a description of the function:def _random_subprocessor(self):
deletion = set()
for e in self._evil:
if e[0] in deletion or e[1] in deletion:
continue
deletion.add(choice(e))
return self._subprocessor(deletion) | [
"Creates a random subprocessor where there is a coupler between\n every pair of working qubits on opposite sides of the same cell.\n This is guaranteed to be minimal in that adding a qubit back in\n will reintroduce a bad coupler, but not to have minimum size.\n\n OUTPUT:\n an :class:`eden_processor` instance\n "
]
|
Please provide a description of the function:def _random_subprocessors(self):
if self._processors is not None:
return (p for p in self._processors)
elif 2**len(self._evil) <= 8 * self._proc_limit:
deletions = self._compute_all_deletions()
if len(deletions) > self._proc_limit:
deletions = sample(deletions, self._proc_limit)
return (self._subprocessor(d) for d in deletions)
else:
return (self._random_subprocessor() for i in range(self._proc_limit)) | [
"Produces an iterator of subprocessors. If there are fewer than\n self._proc_limit subprocessors to consider (by knocking out a\n minimal subset of working qubits incident to broken couplers),\n we work exhaustively. Otherwise, we generate a random set of\n ``self._proc_limit`` subprocessors.\n\n If the total number of possibilities is rather small, then we\n deliberately pick a random minimum subset to avoid coincidences.\n Otherwise, we give up on minimum, satisfy ourselves with minimal,\n and randomly generate subprocessors with :func:`self._random_subprocessor`.\n\n OUTPUT:\n an iterator of eden_processor instances.\n "
]
|
Please provide a description of the function:def _map_to_processors(self, f, objective):
P = self._random_subprocessors()
best = f(next(P))
for p in P:
x = f(p)
if objective(best, x):
best = x
return best[1] | [
"Map a function to a list of processors, and return the output that\n best satisfies a transitive objective function. The list of\n processors will differ according to the number of evil qubits and\n :func:`_proc_limit`, see details in :func:`self._random_subprocessors`.\n\n INPUT:\n f (callable): the function to call on each processor\n\n objective (callable): a function where objective(x,y) is True if x is\n better than y, and False otherwise. Assumes transitivity!\n\n OUTPUT:\n best: the object returned by f that maximizes the objective.\n "
]
|
Please provide a description of the function:def _objective_bestscore(self, old, new):
(oldscore, oldthing) = old
(newscore, newthing) = new
if oldscore is None:
return True
if newscore is None:
return False
return oldscore < newscore | [
"An objective function that returns True if new has a better score\n than old, and ``False`` otherwise.\n\n INPUTS:\n old (tuple): a tuple (score, embedding)\n\n new (tuple): a tuple (score, embedding)\n\n "
]
|
Please provide a description of the function:def _objective_qubitcount(self, old, new):
(oldscore, oldthing) = old
(newscore, newthing) = new
def measure(chains):
return sum(map(len, chains))
if oldscore is None:
return True
if newscore is None:
return False
if len(newthing):
if not len(oldthing):
return True
elif isinstance(newthing, tuple):
newlengths = sum(map(measure, newthing))
oldlengths = sum(map(measure, oldthing))
return newlengths < oldlengths
else:
return measure(newthing) < measure(oldthing)
else:
return False | [
"An objective function that returns True if new uses fewer qubits\n than old, and False otherwise. This objective function should only be\n used to compare embeddings of the same graph (or at least embeddings of\n graphs with the same number of qubits).\n\n INPUTS:\n old (tuple): a tuple (score, embedding)\n\n new (tuple): a tuple (score, embedding)\n\n "
]
|
Please provide a description of the function:def _find_evil(self):
M, N, L = self.M, self.N, self.L
proc = self._proc0
evil = []
cells = [(x, y) for x in range(M) for y in range(N)]
spots = [(u, v) for u in range(L) for v in range(L)]
for x, y in cells:
for u, v in spots:
p = (x, y, 0, u)
q = (x, y, 1, v)
if p in proc and q in proc and p not in proc[q]:
evil.append((p, q))
self._evil = evil | [
"A utility function that computes a list of missing couplers which\n should connect two working qubits in the same cell. The presence\n of (a nonconstant number of) these breaks the polynomial-time\n claim for our algorithm. Note: we're only actually hurt by missing\n intercell couplers.\n "
]
|
Please provide a description of the function:def largestNativeClique(self, max_chain_length=None):
def f(x):
return x.largestNativeClique(max_chain_length=max_chain_length)
objective = self._objective_bestscore
return self._translate(self._map_to_processors(f, objective)) | [
"Returns the largest native clique embedding we can find on the\n processor, with the shortest chainlength possible (for that clique\n size). If possible, returns a uniform choice among all largest\n cliques.\n\n INPUTS:\n max_chain_length (int): longest chain length to consider or ``None`` if chain\n lengths are allowed to be unbounded. (default: ``None``)\n\n OUTPUT:\n clique (list): a list containing lists of qubits, each associated to a\n chain. These lists of qubits are carefully arranged so that\n\n >>> [zip(chain,chain[1:]) for chain in clique]\n\n is a list of valid couplers.\n\n Note: this fails to return a uniform choice if there are broken\n intra-cell couplers between working qubits. (the choice is\n uniform on a particular subprocessor)\n "
]
|
Please provide a description of the function:def nativeCliqueEmbed(self, width):
def f(x):
return x.nativeCliqueEmbed(width)
objective = self._objective_bestscore
return self._translate(self._map_to_processors(f, objective)) | [
"Compute a maximum-sized native clique embedding in an induced\n subgraph of chimera with chainsize ``width+1``. If possible,\n returns a uniform choice among all largest cliques.\n\n INPUTS:\n width: width of the squares to search, also `chainlength-1`\n\n OUTPUT:\n clique: a list containing lists of qubits, each associated\n to a chain. These lists of qubits are carefully\n arranged so that\n\n >>> [zip(chain,chain[1:]) for chain in clique]\n\n is a list of valid couplers.\n\n Note: this fails to return a uniform choice if there are broken\n intra-cell couplers between working qubits. (the choice is\n uniform on a particular subprocessor)\n "
]
|
Please provide a description of the function:def largestNativeBiClique(self, chain_imbalance=0, max_chain_length=None):
def f(x):
return x.largestNativeBiClique(chain_imbalance=chain_imbalance,
max_chain_length=max_chain_length)
objective = self._objective_bestscore
emb = self._map_to_processors(f, objective)
return self._translate_partitioned(emb) | [
"Returns a native embedding for the complete bipartite graph :math:`K_{n,m}`\n for `n <= m`; where `n` is as large as possible and `m` is as large as\n possible subject to `n`. The native embedding of a complete bipartite\n graph is a set of horizontally-aligned qubits connected in lines\n together with an equal-sized set of vertically-aligned qubits\n connected in lines.\n\n INPUTS:\n chain_imbalance: how big of a difference to allow between the\n chain lengths on the two sides of the bipartition. If ``None``,\n then we allow an arbitrary imbalance. (default: ``0``)\n\n max_chain_length: longest chain length to consider or None if chain\n lengths are allowed to be unbounded. (default: ``None``)\n\n OUTPUT:\n embedding (tuple): a tuple of two lists containing lists of qubits.\n If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and\n ``B_side`` are chains of qubits. These lists of qubits are arranged so that\n\n >>> [zip(chain,chain[1:]) for chain in A_side]\n\n and\n\n >>> [zip(chain,chain[1:]) for chain in B_side]\n\n are lists of valid couplers.\n "
]
|
Please provide a description of the function:def _translate(self, embedding):
"Translates an embedding back to linear coordinates if necessary."
if embedding is None:
return None
if not self._linear:
return embedding
return [_bulk_to_linear(self.M, self.N, self.L, chain) for chain in embedding] | []
|
Please provide a description of the function:def _validate_chain_strength(sampler, chain_strength):
properties = sampler.properties
if 'extended_j_range' in properties:
max_chain_strength = - min(properties['extended_j_range'])
elif 'j_range' in properties:
max_chain_strength = - min(properties['j_range'])
else:
raise ValueError("input sampler should have 'j_range' and/or 'extended_j_range' property.")
if chain_strength is None:
chain_strength = max_chain_strength
elif chain_strength > max_chain_strength:
raise ValueError("Provided chain strength exceedds the allowed range.")
return chain_strength | [
"Validate the provided chain strength, checking J-ranges of the sampler's children.\n\n Args:\n chain_strength (float) The provided chain strength. Use None to use J-range.\n\n Returns (float):\n A valid chain strength, either provided or based on available J-range. Positive finite float.\n\n "
]
|
Please provide a description of the function:def sample(self, bqm, apply_flux_bias_offsets=True, **kwargs):
child = self.child
if apply_flux_bias_offsets:
if self.flux_biases is not None:
kwargs[FLUX_BIAS_KWARG] = self.flux_biases
return child.sample(bqm, **kwargs) | [
"Sample from the given Ising model.\n\n Args:\n\n h (list/dict):\n Linear biases of the Ising model. If a list, the list's indices\n are used as variable labels.\n\n J (dict of (int, int):float):\n Quadratic biases of the Ising model.\n\n apply_flux_bias_offsets (bool, optional):\n If True, use the calculated flux_bias offsets (if available).\n\n **kwargs:\n Optional keyword arguments for the sampling method, specified per solver.\n\n Examples:\n This example uses :class:`.VirtualGraphComposite` to instantiate a composed sampler\n that submits an Ising problem to a D-Wave solver selected by the user's\n default\n :std:doc:`D-Wave Cloud Client configuration file <cloud-client:intro>`.\n The problem represents a logical\n NOT gate using penalty function :math:`P = xy`, where variable x is the gate's input\n and y the output. This simple two-variable problem is manually minor-embedded\n to a single :std:doc:`Chimera <system:intro>` unit cell: each variable\n is represented by a chain of half the cell's qubits, x as qubits 0, 1, 4, 5,\n and y as qubits 2, 3, 6, 7.\n The chain strength is set to half the maximum allowed found from querying the solver's extended\n J range. In this example, the ten returned samples all represent valid states of\n the NOT gate.\n\n >>> from dwave.system.samplers import DWaveSampler\n >>> from dwave.system.composites import VirtualGraphComposite\n >>> embedding = {'x': {0, 4, 1, 5}, 'y': {2, 6, 3, 7}}\n >>> DWaveSampler().properties['extended_j_range'] # doctest: +SKIP\n [-2.0, 1.0]\n >>> sampler = VirtualGraphComposite(DWaveSampler(), embedding, chain_strength=1) # doctest: +SKIP\n >>> h = {}\n >>> J = {('x', 'y'): 1}\n >>> response = sampler.sample_ising(h, J, num_reads=10) # doctest: +SKIP\n >>> for sample in response.samples(): # doctest: +SKIP\n ... print(sample)\n ...\n {'y': -1, 'x': 1}\n {'y': 1, 'x': -1}\n {'y': -1, 'x': 1}\n {'y': -1, 'x': 1}\n {'y': -1, 'x': 1}\n {'y': 1, 'x': -1}\n {'y': 1, 'x': -1}\n {'y': 1, 'x': -1}\n {'y': -1, 'x': 1}\n {'y': 1, 'x': -1}\n\n See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_\n for explanations of technical terms in descriptions of Ocean tools.\n\n "
]
|
Please provide a description of the function:def get_flux_biases(sampler, embedding, chain_strength, num_reads=1000, max_age=3600):
if not isinstance(sampler, dimod.Sampler):
raise TypeError("input sampler should be DWaveSampler")
# try to read the chip_id, otherwise get the name
system_name = sampler.properties.get('chip_id', str(sampler.__class__))
try:
with cache_connect() as cur:
fbo = get_flux_biases_from_cache(cur, embedding.values(), system_name,
chain_strength=chain_strength,
max_age=max_age)
return fbo
except MissingFluxBias:
pass
# if dwave-drivers is not available, then we can't calculate the biases
try:
import dwave.drivers as drivers
except ImportError:
msg = ("dwave-drivers not found, cannot calculate flux biases. dwave-drivers can be "
"installed with "
"'pip install dwave-drivers --extra-index-url https://pypi.dwavesys.com/simple'. "
"See documentation for dwave-drivers license.")
raise RuntimeError(msg)
fbo = drivers.oneshot_flux_bias(sampler, embedding.values(), num_reads=num_reads,
chain_strength=chain_strength)
# store them in the cache
with cache_connect() as cur:
for chain in embedding.values():
v = next(iter(chain))
flux_bias = fbo.get(v, 0.0)
insert_flux_bias(cur, chain, system_name, flux_bias, chain_strength)
return fbo | [
"Get the flux bias offsets for sampler and embedding.\n\n Args:\n sampler (:obj:`.DWaveSampler`):\n A D-Wave sampler.\n\n embedding (dict[hashable, iterable]):\n Mapping from a source graph to the specified sampler’s graph (the target graph). The\n keys of embedding should be nodes in the source graph, the values should be an iterable\n of nodes in the target graph.\n\n chain_strength (number):\n Desired chain coupling strength. This is the magnitude of couplings between qubits\n in a chain.\n\n num_reads (int, optional, default=1000):\n The number of reads per system call if new flux biases need to be calculated.\n\n max_age (int, optional, default=3600):\n The maximum age (in seconds) allowed for previously calculated flux bias offsets.\n\n Returns:\n dict: A dict where the keys are the nodes in the chains and the values are the flux biases.\n\n "
]
|
Please provide a description of the function:def find_clique_embedding(k, m, n=None, t=None, target_edges=None):
import random
_, nodes = k
m, n, t, target_edges = _chimera_input(m, n, t, target_edges)
# Special cases to return optimal embeddings for small k. The general clique embedder uses chains of length
# at least 2, whereas cliques of size 1 and 2 can be embedded with single-qubit chains.
if len(nodes) == 1:
# If k == 1 we simply return a single chain consisting of a randomly sampled qubit.
qubits = set().union(*target_edges)
qubit = random.choice(tuple(qubits))
embedding = [[qubit]]
elif len(nodes) == 2:
# If k == 2 we simply return two one-qubit chains that are the endpoints of a randomly sampled coupler.
if not isinstance(target_edges, list):
edges = list(target_edges)
edge = edges[random.randrange(len(edges))]
embedding = [[edge[0]], [edge[1]]]
else:
# General case for k > 2.
embedding = processor(target_edges, M=m, N=n, L=t).tightestNativeClique(len(nodes))
if not embedding:
raise ValueError("cannot find a K{} embedding for given Chimera lattice".format(k))
return dict(zip(nodes, embedding)) | [
"Find an embedding for a clique in a Chimera graph.\n\n Given a target :term:`Chimera` graph size, and a clique (fully connect graph),\n attempts to find an embedding.\n\n Args:\n k (int/iterable):\n Clique to embed. If k is an integer, generates an embedding for a clique of size k\n labelled [0,k-1].\n If k is an iterable, generates an embedding for a clique of size len(k), where\n iterable k is the variable labels.\n\n m (int):\n Number of rows in the Chimera lattice.\n\n n (int, optional, default=m):\n Number of columns in the Chimera lattice.\n\n t (int, optional, default 4):\n Size of the shore within each Chimera tile.\n\n target_edges (iterable[edge]):\n A list of edges in the target Chimera graph. Nodes are labelled as\n returned by :func:`~dwave_networkx.generators.chimera_graph`.\n\n Returns:\n dict: An embedding mapping a clique to the Chimera lattice.\n\n Examples:\n The first example finds an embedding for a :math:`K_4` complete graph in a single\n Chimera unit cell. The second for an alphanumerically labeled :math:`K_3`\n graph in 4 unit cells.\n\n >>> from dwave.embedding.chimera import find_clique_embedding\n ...\n >>> embedding = find_clique_embedding(4, 1, 1)\n >>> embedding # doctest: +SKIP\n {0: [4, 0], 1: [5, 1], 2: [6, 2], 3: [7, 3]}\n\n >>> from dwave.embedding.chimera import find_clique_embedding\n ...\n >>> embedding = find_clique_embedding(['a', 'b', 'c'], m=2, n=2, t=4)\n >>> embedding # doctest: +SKIP\n {'a': [20, 16], 'b': [21, 17], 'c': [22, 18]}\n\n "
]
|
Please provide a description of the function:def find_biclique_embedding(a, b, m, n=None, t=None, target_edges=None):
_, anodes = a
_, bnodes = b
m, n, t, target_edges = _chimera_input(m, n, t, target_edges)
embedding = processor(target_edges, M=m, N=n, L=t).tightestNativeBiClique(len(anodes), len(bnodes))
if not embedding:
raise ValueError("cannot find a K{},{} embedding for given Chimera lattice".format(a, b))
left, right = embedding
return dict(zip(anodes, left)), dict(zip(bnodes, right)) | [
"Find an embedding for a biclique in a Chimera graph.\n\n Given a target :term:`Chimera` graph size, and a biclique (a bipartite graph where every\n vertex in a set in connected to all vertices in the other set), attempts to find an embedding.\n\n Args:\n a (int/iterable):\n Left shore of the biclique to embed. If a is an integer, generates an embedding\n for a biclique with the left shore of size a labelled [0,a-1].\n If a is an iterable, generates an embedding for a biclique with the left shore of size\n len(a), where iterable a is the variable labels.\n\n b (int/iterable):\n Right shore of the biclique to embed.If b is an integer, generates an embedding\n for a biclique with the right shore of size b labelled [0,b-1].\n If b is an iterable, generates an embedding for a biclique with the right shore of\n size len(b), where iterable b provides the variable labels.\n\n m (int):\n Number of rows in the Chimera lattice.\n\n n (int, optional, default=m):\n Number of columns in the Chimera lattice.\n\n t (int, optional, default 4):\n Size of the shore within each Chimera tile.\n\n target_edges (iterable[edge]):\n A list of edges in the target Chimera graph. Nodes are labelled as\n returned by :func:`~dwave_networkx.generators.chimera_graph`.\n\n Returns:\n tuple: A 2-tuple containing:\n\n dict: An embedding mapping the left shore of the biclique to the Chimera lattice.\n\n dict: An embedding mapping the right shore of the biclique to the Chimera lattice\n\n Examples:\n This example finds an embedding for an alphanumerically labeled biclique in a single\n Chimera unit cell.\n\n >>> from dwave.embedding.chimera import find_biclique_embedding\n ...\n >>> left, right = find_biclique_embedding(['a', 'b', 'c'], ['d', 'e'], 1, 1)\n >>> print(left, right) # doctest: +SKIP\n {'a': [4], 'b': [5], 'c': [6]} {'d': [0], 'e': [1]}\n\n "
]
|
Please provide a description of the function:def find_grid_embedding(dim, m, n=None, t=4):
m, n, t, target_edges = _chimera_input(m, n, t, None)
indexer = dnx.generators.chimera.chimera_coordinates(m, n, t)
dim = list(dim)
num_dim = len(dim)
if num_dim == 1:
def _key(row, col, aisle): return row
dim.extend([1, 1])
elif num_dim == 2:
def _key(row, col, aisle): return row, col
dim.append(1)
elif num_dim == 3:
def _key(row, col, aisle): return row, col, aisle
else:
raise ValueError("find_grid_embedding supports between one and three dimensions")
rows, cols, aisles = dim
if rows > m or cols > n or aisles > t:
msg = ("the largest grid that find_grid_embedding can fit in a ({}, {}, {}) Chimera-lattice "
"is {}x{}x{}; given grid is {}x{}x{}").format(m, n, t, m, n, t, rows, cols, aisles)
raise ValueError(msg)
return {_key(row, col, aisle): [indexer.int((row, col, 0, aisle)), indexer.int((row, col, 1, aisle))]
for row in range(dim[0]) for col in range(dim[1]) for aisle in range(dim[2])} | [
"Find an embedding for a grid in a Chimera graph.\n\n Given a target :term:`Chimera` graph size, and grid dimensions, attempts to find an embedding.\n\n Args:\n dim (iterable[int]):\n Sizes of each grid dimension. Length can be between 1 and 3.\n\n m (int):\n Number of rows in the Chimera lattice.\n\n n (int, optional, default=m):\n Number of columns in the Chimera lattice.\n\n t (int, optional, default 4):\n Size of the shore within each Chimera tile.\n\n Returns:\n dict: An embedding mapping a grid to the Chimera lattice.\n\n Examples:\n This example finds an embedding for a 2x3 grid in a 12x12 lattice of Chimera unit cells.\n\n >>> from dwave.embedding.chimera import find_grid_embedding\n ...\n >>> embedding = find_grid_embedding([2, 3], m=12, n=12, t=4)\n >>> embedding # doctest: +SKIP\n {(0, 0): [0, 4],\n (0, 1): [8, 12],\n (0, 2): [16, 20],\n (1, 0): [96, 100],\n (1, 1): [104, 108],\n (1, 2): [112, 116]}\n\n "
]
|
Please provide a description of the function:def cache_file(app_name=APPNAME, app_author=APPAUTHOR, filename=DATABASENAME):
user_data_dir = homebase.user_data_dir(app_name=app_name, app_author=app_author, create=True)
return os.path.join(user_data_dir, filename) | [
"Returns the filename (including path) for the data cache.\n\n The path will depend on the operating system, certain environmental\n variables and whether it is being run inside a virtual environment.\n See `homebase <https://github.com/dwavesystems/homebase>`_.\n\n Args:\n app_name (str, optional): The application name.\n Default is given by :obj:`.APPNAME`.\n app_author (str, optional): The application author. Default\n is given by :obj:`.APPAUTHOR`.\n filename (str, optional): The name of the database file.\n Default is given by :obj:`DATABASENAME`.\n\n Returns:\n str: The full path to the file that can be used as a cache.\n\n Notes:\n Creates the directory if it does not already exist.\n\n If run inside of a virtual environment, the cache will be stored\n in `/path/to/virtualenv/data/app_name`\n\n "
]
|
Please provide a description of the function:def _restore_isolated(sampleset, bqm, isolated):
samples = sampleset.record.sample
variables = sampleset.variables
new_samples = np.empty((len(sampleset), len(isolated)), dtype=samples.dtype)
# we don't let the isolated variables interact with each other for now because
# it will slow this down substantially
for col, v in enumerate(isolated):
try:
neighbours, biases = zip(*((u, bias) for u, bias in bqm.adj[v].items()
if u in variables)) # ignore other isolates
except ValueError:
# happens when only neighbors are other isolated variables
new_samples[:, col] = bqm.linear[v] <= 0
continue
idxs = [variables.index[u] for u in neighbours]
# figure out which value for v would minimize the energy
# v(h_v + \sum_u J_uv * u)
new_samples[:, col] = samples[:, idxs].dot(biases) < -bqm.linear[v]
if bqm.vartype is dimod.SPIN:
new_samples = 2*new_samples - 1
return np.concatenate((samples, new_samples), axis=1), list(variables) + isolated | [
"Return samples-like by adding isolated variables into sampleset in a\n way that minimizes the energy (relative to the other non-isolated variables).\n "
]
|
Please provide a description of the function:def _restore_isolated_higherorder(sampleset, poly, isolated):
samples = sampleset.record.sample
variables = sampleset.variables
new_samples = np.empty((len(sampleset), len(isolated)), dtype=samples.dtype)
# we don't let the isolated variables interact with eachother for now because
# it will slow this down substantially
isolated_energies = {v: 0 for v in isolated}
for term, bias in poly.items():
isolated_components = term.intersection(isolated)
if not isolated_components:
continue
en = bias # energy contribution of the term
for v in term:
if v in isolated_energies:
continue
en *= samples[:, sampleset.variables.index(v)]
for v in isolated_components:
isolated_energies[v] += en
# now put those energies into new_samples
for col, v in enumerate(isolated):
new_samples[:, col] = isolated_energies[v] < 0
if poly.vartype is dimod.SPIN:
new_samples = 2*new_samples - 1
return np.concatenate((samples, new_samples), axis=1), list(variables) + isolated | [
"Return samples-like by adding isolated variables into sampleset in a\n way that minimizes the energy (relative to the other non-isolated variables).\n\n Isolated should be ordered.\n "
]
|
Please provide a description of the function:def sample(self, bqm, **parameters):
child = self.child
cutoff = self._cutoff
cutoff_vartype = self._cutoff_vartype
comp = self._comparison
if cutoff_vartype is dimod.SPIN:
original = bqm.spin
else:
original = bqm.binary
# remove all of the interactions less than cutoff
new = type(bqm)(original.linear,
((u, v, bias)
for (u, v), bias in original.quadratic.items()
if not comp(abs(bias), cutoff)),
original.offset,
original.vartype)
# next we check for isolated qubits and remove them, we could do this as
# part of the construction but the assumption is there should not be
# a large number in the 'typical' case
isolated = [v for v in new if not new.adj[v]]
new.remove_variables_from(isolated)
if isolated and len(new) == 0:
# in this case all variables are isolated, so we just put one back
# to serve as the basis
v = isolated.pop()
new.linear[v] = original.linear[v]
# get the samples from the child sampler and put them into the original vartype
sampleset = child.sample(new, **parameters).change_vartype(bqm.vartype, inplace=True)
# we now need to add the isolated back in, in a way that minimizes
# the energy. There are lots of ways to do this but for now we'll just
# do one
if isolated:
samples, variables = _restore_isolated(sampleset, bqm, isolated)
else:
samples = sampleset.record.sample
variables = sampleset.variables
vectors = sampleset.data_vectors
vectors.pop('energy') # we're going to recalculate the energy anyway
return dimod.SampleSet.from_samples_bqm((samples, variables), bqm, **vectors) | [
"Cutoff and sample from the provided binary quadratic model.\n\n Removes interactions smaller than a given cutoff. Isolated\n variables (after the cutoff) are also removed.\n\n Note that if the problem had isolated variables before the cutoff, they\n will also be affected.\n\n Args:\n bqm (:obj:`dimod.BinaryQuadraticModel`):\n Binary quadratic model to be sampled from.\n\n **parameters:\n Parameters for the sampling method, specified by the child sampler.\n\n Returns:\n :obj:`dimod.SampleSet`\n\n "
]
|
Please provide a description of the function:def sample_poly(self, poly, **kwargs):
child = self.child
cutoff = self._cutoff
cutoff_vartype = self._cutoff_vartype
comp = self._comparison
if cutoff_vartype is dimod.SPIN:
original = poly.to_spin(copy=False)
else:
original = poly.to_binary(copy=False)
# remove all of the terms of order >= 2 that have a bias less than cutoff
new = type(poly)(((term, bias) for term, bias in original.items()
if len(term) > 1 and not comp(abs(bias), cutoff)),
cutoff_vartype)
# also include the linear biases for the variables in new
for v in new.variables:
term = v,
if term in original:
new[term] = original[term]
# everything else is isolated
isolated = list(original.variables.difference(new.variables))
if isolated and len(new) == 0:
# in this case all variables are isolated, so we just put one back
# to serve as the basis
term = isolated.pop(),
new[term] = original[term]
# get the samples from the child sampler and put them into the original vartype
sampleset = child.sample_poly(new, **kwargs).change_vartype(poly.vartype, inplace=True)
# we now need to add the isolated back in, in a way that minimizes
# the energy. There are lots of ways to do this but for now we'll just
# do one
if isolated:
samples, variables = _restore_isolated_higherorder(sampleset, poly, isolated)
else:
samples = sampleset.record.sample
variables = sampleset.variables
vectors = sampleset.data_vectors
vectors.pop('energy') # we're going to recalculate the energy anyway
return dimod.SampleSet.from_samples_bqm((samples, variables), poly, **vectors) | [
"Cutoff and sample from the provided binary polynomial.\n\n Removes interactions smaller than a given cutoff. Isolated\n variables (after the cutoff) are also removed.\n\n Note that if the problem had isolated variables before the cutoff, they\n will also be affected.\n\n Args:\n poly (:obj:`dimod.BinaryPolynomial`):\n Binary polynomial to be sampled from.\n\n **parameters:\n Parameters for the sampling method, specified by the child sampler.\n\n Returns:\n :obj:`dimod.SampleSet`\n\n "
]
|
Please provide a description of the function:def diagnose_embedding(emb, source, target):
if not hasattr(source, 'edges'):
source = nx.Graph(source)
if not hasattr(target, 'edges'):
target = nx.Graph(target)
label = {}
embedded = set()
for x in source:
try:
embx = emb[x]
missing_chain = len(embx) == 0
except KeyError:
missing_chain = True
if missing_chain:
yield MissingChainError, x
continue
all_present = True
for q in embx:
if label.get(q, x) != x:
yield ChainOverlapError, q, x, label[q]
elif q not in target:
all_present = False
yield InvalidNodeError, x, q
else:
label[q] = x
if all_present:
embedded.add(x)
if not nx.is_connected(target.subgraph(embx)):
yield DisconnectedChainError, x
yielded = nx.Graph()
for p, q in target.subgraph(label).edges():
yielded.add_edge(label[p], label[q])
for x, y in source.edges():
if x == y:
continue
if x in embedded and y in embedded and not yielded.has_edge(x, y):
yield MissingEdgeError, x, y | [
"A detailed diagnostic for minor embeddings.\n\n This diagnostic produces a generator, which lists all issues with `emb`. The errors\n are yielded in the form\n\n ExceptionClass, arg1, arg2,...\n\n where the arguments following the class are used to construct the exception object.\n User-friendly variants of this function are :func:`is_valid_embedding`, which returns a\n bool, and :func:`verify_embedding` which raises the first observed error. All exceptions\n are subclasses of :exc:`.EmbeddingError`.\n\n Args:\n emb (dict):\n Dictionary mapping source nodes to arrays of target nodes.\n\n source (list/:obj:`networkx.Graph`):\n Graph to be embedded as a NetworkX graph or a list of edges.\n\n target (list/:obj:`networkx.Graph`):\n Graph being embedded into as a NetworkX graph or a list of edges.\n\n Yields:\n One of:\n :exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty\n\n :exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`\n\n :exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`\n\n :exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`\n\n :exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains\n "
]
|
Please provide a description of the function:def is_valid_embedding(emb, source, target):
for _ in diagnose_embedding(emb, source, target):
return False
return True | [
"A simple (bool) diagnostic for minor embeddings.\n\n See :func:`diagnose_embedding` for a more detailed diagnostic / more information.\n\n Args:\n emb (dict): a dictionary mapping source nodes to arrays of target nodes\n source (graph or edgelist): the graph to be embedded\n target (graph or edgelist): the graph being embedded into\n\n Returns:\n bool: True if `emb` is valid.\n\n "
]
|
Please provide a description of the function:def verify_embedding(emb, source, target, ignore_errors=()):
for error in diagnose_embedding(emb, source, target):
eclass = error[0]
if eclass not in ignore_errors:
raise eclass(*error[1:])
return True | [
"A simple (exception-raising) diagnostic for minor embeddings.\n\n See :func:`diagnose_embedding` for a more detailed diagnostic / more information.\n\n Args:\n emb (dict): a dictionary mapping source nodes to arrays of target nodes\n source (graph or edgelist): the graph to be embedded\n target (graph or edgelist): the graph being embedded into\n\n Raises:\n EmbeddingError: a catch-all class for the below\n\n MissingChainError: in case a key is missing from `emb`, or the associated chain is empty\n ChainOverlapError: in case two chains contain the same target node\n DisconnectedChainError: in case a chain is disconnected\n InvalidNodeError: in case a chain contains a node label not found in `target`\n MissingEdgeError: in case a source edge is not represented by any target edges\n\n Returns:\n bool: True (if no exception is raised)\n "
]
|
Please provide a description of the function:def resolve_object(self, object_arg_name, resolver):
def decorator(func_or_class):
if isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(decorator)
return func_or_class
@wraps(func_or_class)
def wrapper(*args, **kwargs):
kwargs[object_arg_name] = resolver(kwargs)
return func_or_class(*args, **kwargs)
return wrapper
return decorator | [
"\n A helper decorator to resolve object instance from arguments (e.g. identity).\n\n Example:\n >>> @namespace.route('/<int:user_id>')\n ... class MyResource(Resource):\n ... @namespace.resolve_object(\n ... object_arg_name='user',\n ... resolver=lambda kwargs: User.query.get_or_404(kwargs.pop('user_id'))\n ... )\n ... def get(self, user):\n ... # user is a User instance here\n "
]
|
Please provide a description of the function:def model(self, name=None, model=None, mask=None, **kwargs):
if isinstance(model, (flask_marshmallow.Schema, flask_marshmallow.base_fields.FieldABC)):
if not name:
name = model.__class__.__name__
api_model = Model(name, model, mask=mask)
api_model.__apidoc__ = kwargs
return self.add_model(name, api_model)
return super(Namespace, self).model(name=name, model=model, **kwargs) | [
"\n Model registration decorator.\n "
]
|
Please provide a description of the function:def parameters(self, parameters, locations=None):
def decorator(func):
if locations is None and parameters.many:
_locations = ('json', )
else:
_locations = locations
if _locations is not None:
parameters.context['in'] = _locations
return self.doc(params=parameters)(
self.response(code=HTTPStatus.UNPROCESSABLE_ENTITY)(
self.WEBARGS_PARSER.use_args(parameters, locations=_locations)(
func
)
)
)
return decorator | [
"\n Endpoint parameters registration decorator.\n "
]
|
Please provide a description of the function:def response(self, model=None, code=HTTPStatus.OK, description=None, **kwargs):
code = HTTPStatus(code)
if code is HTTPStatus.NO_CONTENT:
assert model is None
if model is None and code not in {HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT}:
if code.value not in http_exceptions.default_exceptions:
raise ValueError("`model` parameter is required for code %d" % code)
model = self.model(
name='HTTPError%d' % code,
model=DefaultHTTPErrorSchema(http_code=code)
)
if description is None:
description = code.description
def response_serializer_decorator(func):
def dump_wrapper(*args, **kwargs):
# pylint: disable=missing-docstring
response = func(*args, **kwargs)
extra_headers = None
if response is None:
if model is not None:
raise ValueError("Response cannot not be None with HTTP status %d" % code)
return flask.Response(status=code)
elif isinstance(response, flask.Response) or model is None:
return response
elif isinstance(response, tuple):
response, _code, extra_headers = unpack(response)
else:
_code = code
if HTTPStatus(_code) is code:
response = model.dump(response).data
return response, _code, extra_headers
return dump_wrapper
def decorator(func_or_class):
if code.value in http_exceptions.default_exceptions:
# If the code is handled by raising an exception, it will
# produce a response later, so we don't need to apply a useless
# wrapper.
decorated_func_or_class = func_or_class
elif isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(response_serializer_decorator)
decorated_func_or_class = func_or_class
else:
decorated_func_or_class = wraps(func_or_class)(
response_serializer_decorator(func_or_class)
)
if model is None:
api_model = None
else:
if isinstance(model, Model):
api_model = model
else:
api_model = self.model(model=model)
if getattr(model, 'many', False):
api_model = [api_model]
doc_decorator = self.doc(
responses={
code.value: (description, api_model)
}
)
return doc_decorator(decorated_func_or_class)
return decorator | [
"\n Endpoint response OpenAPI documentation decorator.\n\n It automatically documents HTTPError%(code)d responses with relevant\n schemas.\n\n Arguments:\n model (flask_marshmallow.Schema) - it can be a class or an instance\n of the class, which will be used for OpenAPI documentation\n purposes. It can be omitted if ``code`` argument is set to an\n error HTTP status code.\n code (int) - HTTP status code which is documented.\n description (str)\n\n Example:\n >>> @namespace.response(BaseTeamSchema(many=True))\n ... @namespace.response(code=HTTPStatus.FORBIDDEN)\n ... def get_teams():\n ... if not user.is_admin:\n ... abort(HTTPStatus.FORBIDDEN)\n ... return Team.query.all()\n ",
"\n This decorator handles responses to serialize the returned value\n with a given model.\n "
]
|
Please provide a description of the function:def _apply_decorator_to_methods(cls, decorator):
for method in cls.methods:
method_name = method.lower()
decorated_method_func = decorator(getattr(cls, method_name))
setattr(cls, method_name, decorated_method_func) | [
"\n This helper can apply a given decorator to all methods on the current\n Resource.\n\n NOTE: In contrast to ``Resource.method_decorators``, which has a\n similar use-case, this method applies decorators directly and override\n methods in-place, while the decorators listed in\n ``Resource.method_decorators`` are applied on every request which is\n quite a waste of resources.\n "
]
|
Please provide a description of the function:def options(self, *args, **kwargs):
# This is a generic implementation of OPTIONS method for resources.
# This method checks every permissions provided as decorators for other
# methods to provide information about what methods `current_user` can
# use.
method_funcs = [getattr(self, m.lower()) for m in self.methods]
allowed_methods = []
request_oauth_backup = getattr(flask.request, 'oauth', None)
for method_func in method_funcs:
if getattr(method_func, '_access_restriction_decorators', None):
if not hasattr(method_func, '_cached_fake_method_func'):
fake_method_func = lambda *args, **kwargs: True
# `__name__` is used in `login_required` decorator, so it
# is required to fake this also
fake_method_func.__name__ = 'options'
# Decorate the fake method with the registered access
# restriction decorators
for decorator in method_func._access_restriction_decorators:
fake_method_func = decorator(fake_method_func)
# Cache the `fake_method_func` to avoid redoing this over
# and over again
method_func.__dict__['_cached_fake_method_func'] = fake_method_func
else:
fake_method_func = method_func._cached_fake_method_func
flask.request.oauth = None
try:
fake_method_func(self, *args, **kwargs)
except HTTPException:
# This method is not allowed, so skip it
continue
allowed_methods.append(method_func.__name__.upper())
flask.request.oauth = request_oauth_backup
return flask.Response(
status=HTTPStatus.NO_CONTENT,
headers={'Allow': ", ".join(allowed_methods)}
) | [
"\n Check which methods are allowed.\n\n Use this method if you need to know what operations are allowed to be\n performed on this endpoint, e.g. to decide wether to display a button\n in your UI.\n\n The list of allowed methods is provided in `Allow` response header.\n "
]
|
Please provide a description of the function:def validate_patch_structure(self, data):
if data['op'] not in self.NO_VALUE_OPERATIONS and 'value' not in data:
raise ValidationError('value is required')
if 'path' not in data:
raise ValidationError('Path is required and must always begin with /')
else:
data['field_name'] = data['path'][1:] | [
"\n Common validation of PATCH structure\n\n Provide check that 'value' present in all operations expect it.\n\n Provide check if 'path' is present. 'path' can be absent if provided\n without '/' at the start. Supposed that if 'path' is present than it\n is prepended with '/'.\n Removing '/' in the beginning to simplify usage in resource.\n "
]
|
Please provide a description of the function:def perform_patch(cls, operations, obj, state=None):
if state is None:
state = {}
for operation in operations:
if not cls._process_patch_operation(operation, obj=obj, state=state):
log.info(
"%s patching has been stopped because of unknown operation %s",
obj.__class__.__name__,
operation
)
raise ValidationError(
"Failed to update %s details. Operation %s could not succeed." % (
obj.__class__.__name__,
operation
)
)
return True | [
"\n Performs all necessary operations by calling class methods with\n corresponding names.\n "
]
|
Please provide a description of the function:def _process_patch_operation(cls, operation, obj, state):
field_operaion = operation['op']
if field_operaion == cls.OP_REPLACE:
return cls.replace(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_TEST:
return cls.test(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_ADD:
return cls.add(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_MOVE:
return cls.move(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_COPY:
return cls.copy(obj, operation['field_name'], operation['value'], state=state)
elif field_operaion == cls.OP_REMOVE:
return cls.remove(obj, operation['field_name'], state=state)
return False | [
"\n Args:\n operation (dict): one patch operation in RFC 6902 format.\n obj (object): an instance which is needed to be patched.\n state (dict): inter-operations state storage\n\n Returns:\n processing_status (bool): True if operation was handled, otherwise False.\n "
]
|
Please provide a description of the function:def replace(cls, obj, field, value, state):
if not hasattr(obj, field):
raise ValidationError("Field '%s' does not exist, so it cannot be patched" % field)
setattr(obj, field, value)
return True | [
"\n This is method for replace operation. It is separated to provide a\n possibility to easily override it in your Parameters.\n\n Args:\n obj (object): an instance to change.\n field (str): field name\n value (str): new value\n state (dict): inter-operations state storage\n\n Returns:\n processing_status (bool): True\n "
]
|
Please provide a description of the function:def get_identities(self, item):
# All identities are in the post stream
# The first post is the question. Next replies
posts = item['data']['post_stream']['posts']
for post in posts:
user = self.get_sh_identity(post)
yield user | [
" Return the identities from an item "
]
|
Please provide a description of the function:def __related_categories(self, category_id):
related = []
for cat in self.categories_tree:
if category_id in self.categories_tree[cat]:
related.append(self.categories[cat])
return related | [
" Get all related categories to a given one "
]
|
Please provide a description of the function:def __show_categories_tree(self):
for cat in self.categories_tree:
print("%s (%i)" % (self.categories[cat], cat))
for subcat in self.categories_tree[cat]:
print("-> %s (%i)" % (self.categories[subcat], subcat)) | [
" Show the category tree: list of categories and its subcategories "
]
|
Please provide a description of the function:def fetch_track_items(upstream_file_url, data_source):
track_uris = []
req = requests_ses.get(upstream_file_url)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Can't get gerrit reviews from %s", upstream_file_url)
logger.warning(ex)
return track_uris
logger.debug("Found reviews to be tracked in %s", upstream_file_url)
lines = iter(req.text.split("\n"))
for line in lines:
if 'url: ' in line:
dso = next(lines).split('system: ')[1].strip('\n')
if dso == data_source:
track_uris.append(line.split('url: ')[1].strip('\n'))
return track_uris | [
" The file format is:\n\n # Upstream contributions, bitergia will crawl this and extract the relevant information\n # system is one of Gerrit, Bugzilla, Launchpad (insert more)\n ---\n -\n url: https://review.openstack.org/169836\n system: Gerrit\n "
]
|
Please provide a description of the function:def _create_projects_file(project_name, data_source, items):
repositories = []
for item in items:
if item['origin'] not in repositories:
repositories.append(item['origin'])
projects = {
project_name: {
data_source: repositories
}
}
projects_file, projects_file_path = tempfile.mkstemp(prefix='track_items_')
with open(projects_file_path, "w") as pfile:
json.dump(projects, pfile, indent=True)
return projects_file_path | [
" Create a projects file from the items origin data "
]
|
Please provide a description of the function:def __convert_booleans(self, eitem):
for field in eitem.keys():
if isinstance(eitem[field], bool):
if eitem[field]:
eitem[field] = 1
else:
eitem[field] = 0
return eitem | [
" Convert True/False to 1/0 for better kibana processing "
]
|
Please provide a description of the function:def enrich_items(self, ocean_backend, events=False):
max_items = self.elastic.max_items_bulk
current = 0
total = 0
bulk_json = ""
items = ocean_backend.fetch()
images_items = {}
url = self.elastic.index_url + '/items/_bulk'
logger.debug("Adding items to %s (in %i packs)", self.elastic.anonymize_url(url), max_items)
for item in items:
if current >= max_items:
total += self.elastic.safe_put_bulk(url, bulk_json)
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("Added %i items to %s (%0.2f MB)", total, self.elastic.anonymize_url(url), json_size)
bulk_json = ""
current = 0
rich_item = self.get_rich_item(item)
data_json = json.dumps(rich_item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(item[self.get_field_unique_id()])
bulk_json += data_json + "\n" # Bulk document
current += 1
if rich_item['id'] not in images_items:
# Let's transform the rich_event in a rich_image
rich_item['is_docker_image'] = 1
rich_item['is_event'] = 0
images_items[rich_item['id']] = rich_item
else:
image_date = images_items[rich_item['id']]['last_updated']
if image_date <= rich_item['last_updated']:
# This event is newer for the image
rich_item['is_docker_image'] = 1
rich_item['is_event'] = 0
images_items[rich_item['id']] = rich_item
if current > 0:
total += self.elastic.safe_put_bulk(url, bulk_json)
if total == 0:
# No items enriched, nothing to upload to ES
return total
# Time to upload the images enriched items. The id is uuid+"_image"
# Normally we are enriching events for a unique image so all images
# data can be upload in one query
for image in images_items:
data = images_items[image]
data_json = json.dumps(data)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(data['id'] + "_image")
bulk_json += data_json + "\n" # Bulk document
total += self.elastic.safe_put_bulk(url, bulk_json)
return total | [
" A custom enrich items is needed because apart from the enriched\n events from raw items, a image item with the last data for an image\n must be created "
]
|
Please provide a description of the function:def get_params_parser():
parser = argparse.ArgumentParser()
ElasticOcean.add_params(parser)
parser.add_argument('-g', '--debug', dest='debug', action='store_true')
parser.add_argument('-t', '--token', dest='token', help="GitHub token")
parser.add_argument('-o', '--org', dest='org', help='GitHub Organization to be analyzed')
parser.add_argument('-c', '--contact', dest='contact', help='Contact (mail) to notify events.')
parser.add_argument('--twitter', dest='twitter', help='Twitter account to notify.')
parser.add_argument('-w', '--web-dir', default='/var/www/cauldron/dashboards', dest='web_dir',
help='Redirect HTML project pages for accessing Kibana dashboards.')
parser.add_argument('-k', '--kibana-url', default='https://dashboard.cauldron.io', dest='kibana_url',
help='Kibana URL.')
parser.add_argument('-u', '--graas-url', default='https://cauldron.io', dest='graas_url',
help='GraaS service URL.')
parser.add_argument('-n', '--nrepos', dest='nrepos', type=int, default=NREPOS,
help='Number of GitHub repositories from the Organization to be analyzed (default:10)')
return parser | [
"Parse command line arguments"
]
|
Please provide a description of the function:def get_owner_repos_url(owner, token):
url_org = GITHUB_API_URL + "/orgs/" + owner + "/repos"
url_user = GITHUB_API_URL + "/users/" + owner + "/repos"
url_owner = url_org # Use org by default
try:
r = requests.get(url_org,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if r.status_code == 403:
rate_limit_reset_ts = datetime.fromtimestamp(int(r.headers['X-RateLimit-Reset']))
seconds_to_reset = (rate_limit_reset_ts - datetime.utcnow()).seconds + 1
logging.info("GitHub rate limit exhausted. Waiting %i secs for rate limit reset." % (seconds_to_reset))
sleep(seconds_to_reset)
else:
# owner is not an org, try with a user
url_owner = url_user
return url_owner | [
" The owner could be a org or a user.\n It waits if need to have rate limit.\n Also it fixes a djando issue changing - with _\n "
]
|
Please provide a description of the function:def get_repositores(owner_url, token, nrepos):
all_repos = []
url = owner_url
while True:
logging.debug("Getting repos from: %s" % (url))
try:
r = requests.get(url,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
all_repos += r.json()
logging.debug("Rate limit: %s" % (r.headers['X-RateLimit-Remaining']))
if 'next' not in r.links:
break
url = r.links['next']['url'] # Loving requests :)
except requests.exceptions.ConnectionError:
logging.error("Can not connect to GitHub")
break
# Remove forks
nrepos_recent = [repo for repo in all_repos if not repo['fork']]
# Sort by updated_at and limit to nrepos
nrepos_sorted = sorted(nrepos_recent, key=lambda repo: parser.parse(repo['updated_at']), reverse=True)
nrepos_sorted = nrepos_sorted[0:nrepos]
# First the small repositories to feedback the user quickly
nrepos_sorted = sorted(nrepos_sorted, key=lambda repo: repo['size'])
for repo in nrepos_sorted:
logging.debug("%s %i %s" % (repo['updated_at'], repo['size'], repo['name']))
return nrepos_sorted | [
" owner could be an org or and user "
]
|
Please provide a description of the function:def create_redirect_web_page(web_dir, org_name, kibana_url):
html_redirect =
html_redirect += \
% kibana_url
html_redirect +=
html_redirect +=
html_redirect += \
% org_name
html_redirect += \
% org_name
html_redirect +=
html_redirect +=
try:
with open(path.join(web_dir, org_name), "w") as f:
f.write(html_redirect)
except FileNotFoundError as ex:
logging.error("Wrong web dir for redirect pages: %s" % (web_dir))
logging.error(ex) | [
" Create HTML pages with the org name that redirect to\n the Kibana dashboard filtered for this org ",
"\n <html>\n <head>\n ",
"<meta http-equiv=\"refresh\" content=\"0; URL=%s/app/kibana",
"#/dashboard/Overview?_g=(filters:!(('$state':",
"(store:globalState),meta:(alias:!n,disabled:!f,index:",
"github_git_enrich,key:project,negate:!f,value:%s),",
"query:(match:(project:(query:%s,type:phrase))))),",
"refreshInterval:(display:Off,pause:!f,value:0),",
"time:(from:now-2y,mode:quick,to:now))\" />\n </head>\n </html>\n "
]
|
Please provide a description of the function:def notify_contact(mail, owner, graas_url, repos, first_repo=False):
footer =
twitter_txt = "Check Cauldron.io dashboard for %s at %s/dashboards/%s" % (owner, graas_url, owner)
twitter_url = "https://twitter.com/intent/tweet?text=" + quote_plus(twitter_txt)
twitter_url += "&via=bitergia"
if first_repo:
logging.info("Sending first email to %s" % (mail))
subject = "First repository for %s already in the Cauldron" % (owner)
else:
logging.info("Sending last email to %s" % (mail))
subject = "Your Cauldron %s dashboard is ready!" % (owner)
if first_repo:
# body = "%s/dashboards/%s\n\n" % (graas_url, owner)
# body += "First repository analized: %s\n" % (repos[0]['html_url'])
body = % (graas_url, footer)
else:
body = % (graas_url, owner, twitter_url, footer)
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = '[email protected]'
msg['To'] = mail
try:
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
except ConnectionRefusedError:
logging.error("Can not notify user. Can not connect to email server.") | [
" Send an email to the contact with the details to access\n the Kibana dashboard ",
"\n--\nBitergia Cauldron Team\nhttp://bitergia.com\n ",
"\nFirst repository has been analyzed and it's already in the Cauldron. Be patient, we have just started, step by step.\n\nWe will notify you when everything is ready.\n\nMeanwhile, check latest dashboards in %s\n\nThanks,\n%s\n ",
"\nCheck it at: %s/dashboards/%s\n\nPlay with it, and send us feedback:\nhttps://github.com/Bitergia/cauldron.io/issues/new\n\nShare it on Twitter:\n%s\n\nThank you very much,\n%s\n "
]
|
Please provide a description of the function:def publish_twitter(twitter_contact, owner):
dashboard_url = CAULDRON_DASH_URL + "/%s" % (owner)
tweet = "@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon" \
% (twitter_contact, owner, dashboard_url)
status = quote_plus(tweet)
oauth = get_oauth()
r = requests.post(url="https://api.twitter.com/1.1/statuses/update.json?status=" + status, auth=oauth) | [
" Publish in twitter the dashboard "
]
|
Please provide a description of the function:def get_identities(self, item):
user = self.get_sh_identity(item, self.get_field_author())
yield user | [
"Return the identities from an item"
]
|
Please provide a description of the function:def get_identities(self, item):
for rol in self.roles:
if rol in item['data']:
yield self.get_sh_identity(item["data"][rol]) | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_identities(self, item):
item = item['data']
for identity in self.issue_roles:
if item[identity]:
user = self.get_sh_identity(item[identity])
if user:
yield user | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_perceval_params_from_url(cls, urls):
params = []
dparam = cls.get_arthur_params_from_url(urls)
params.append(dparam["url"])
return params | [
" Get the perceval params given the URLs for the data source "
]
|
Please provide a description of the function:def get_identities(self, item):
item = item['data']
for identity in ['creator']:
# Todo: questions has also involved and solved_by
if identity in item and item[identity]:
user = self.get_sh_identity(item[identity])
yield user
if 'answers_data' in item:
for answer in item['answers_data']:
user = self.get_sh_identity(answer[identity])
yield user | [
" Return the identities from an item "
]
|
Please provide a description of the function:def kafka_kip(enrich):
def extract_vote_and_binding(body):
vote = 0
binding = 0 # by default the votes are binding for +1
nlines = 0
for line in body.split("\n"):
if nlines > MAX_LINES_FOR_VOTE:
# The vote must be in the first MAX_LINES_VOTE
break
if line.startswith(">"):
# This line is from a previous email
continue
elif "+1" in line and "-1" in line:
# Report summary probably
continue
elif "to -1" in line or "is -1" in line or "= -1" in line or "-1 or" in line:
continue
elif line.startswith("+1") or " +1 " in line or line.endswith("+1") \
or " +1." in line or " +1," in line:
vote = 1
binding = 1 # by default the votes are binding for +1
if 'non-binding' in line.lower():
binding = 0
elif 'binding' in line.lower():
binding = 1
break
elif line.startswith("-1") or line.endswith(" -1") or " -1 " in line \
or " -1." in line or " -1," in line:
vote = -1
if 'non-binding' in line.lower():
binding = 0
elif 'binding' in line.lower():
binding = 1
break
nlines += 1
return (vote, binding)
def extract_kip(subject):
kip = None
if not subject:
return kip
if 'KIP' not in subject:
return kip
kip_tokens = subject.split('KIP')
if len(kip_tokens) > 2:
# [KIP-DISCUSSION] KIP-7 Security
for token in kip_tokens:
kip = extract_kip("KIP" + token)
if kip:
break
# logger.debug("Several KIPs in %s. Found: %i", subject, kip)
return kip
str_with_kip = kip_tokens[1]
if not str_with_kip:
# Sample use case subject: Create a space template for KIP
return kip
if str_with_kip[0] == '-':
try:
# KIP-120: Control
str_kip = str_with_kip[1:].split(":")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-8 Add
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-11- Authorization
str_kip = str_with_kip[1:].split("-")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Bound fetch response size (KIP-74)
str_kip = str_with_kip[1:].split(")")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-31&
str_kip = str_with_kip[1:].split("&")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-31/
str_kip = str_with_kip[1:].split("/")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Re: Copycat (KIP-26. PR-99) - plan on moving forward
str_kip = str_with_kip[1:].split(".")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
elif str_with_kip[0] == ' ':
try:
# KIP 20 Enable
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Re: [DISCUSS] KIP 88: DescribeGroups Protocol Update
str_kip = str_with_kip[1:].split(":")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# [jira] [Updated] (KAFKA-5092) KIP 141- ProducerRecordBuilder
str_kip = str_with_kip[1:].split("-")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
elif str_with_kip[0] == ':':
try:
# Re: [VOTE] KIP:71 Enable log compaction and deletion to co-exist
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
if not kip:
# logger.debug("Can not extract KIP from %s", subject)
pass
return kip
def lazy_result(votes):
yes = 0
yes_binding = 0
veto = 0
veto_binding = 0
result = -1
for (vote, binding) in votes:
if vote == 1:
if binding:
yes_binding += 1
else:
yes += 1
if vote == -1:
if binding:
veto_binding += 1
else:
veto += 1
if veto_binding == 0 and yes_binding >= 3:
result = 1
return result
def add_kip_final_status_field(enrich):
total = 0
for eitem in enrich.fetch():
if "kip" not in eitem:
# It is not a KIP message
continue
if eitem['kip'] in enrich.kips_final_status:
eitem.update({"kip_final_status":
enrich.kips_final_status[eitem['kip']]})
else:
logger.warning("No final status for kip: %i", eitem['kip'])
eitem.update({"kip_final_status": None})
yield eitem
total += 1
logger.info("Total eitems with kafka final status kip field %i", total)
def add_kip_time_status_fields(enrich):
total = 0
max_inactive_days = 90 # days
enrich.kips_final_status = {} # final status for each kip
for eitem in enrich.fetch():
# kip_status: adopted (closed), discussion (open), voting (open),
# inactive (open), discarded (closed)
# kip_start_end: discuss_start, discuss_end, voting_start, voting_end
kip_fields = {
"kip_status": None,
"kip_discuss_time_days": None,
"kip_discuss_inactive_days": None,
"kip_voting_time_days": None,
"kip_voting_inactive_days": None,
"kip_is_first_discuss": 0,
"kip_is_first_vote": 0,
"kip_is_last_discuss": 0,
"kip_is_last_vote": 0,
"kip_result": None,
"kip_start_end": None
}
if "kip" not in eitem:
# It is not a KIP message
continue
kip = eitem["kip"]
kip_date = parser.parse(eitem["email_date"])
if eitem['kip_is_discuss']:
kip_fields["kip_discuss_time_days"] = \
get_time_diff_days(enrich.kips_dates[kip]['kip_min_discuss'],
enrich.kips_dates[kip]['kip_max_discuss'])
# Detect first and last discuss messages
if kip_date == enrich.kips_dates[kip]['kip_min_discuss']:
kip_fields['kip_is_first_discuss'] = 1
kip_fields['kip_start_end'] = 'discuss_start'
elif kip_date == enrich.kips_dates[kip]['kip_max_discuss']:
kip_fields['kip_is_last_discuss'] = 1
kip_fields['kip_start_end'] = 'discuss_end'
# Detect discussion status
if "kip_min_vote" not in enrich.kips_dates[kip]:
kip_fields['kip_status'] = 'discussion'
max_discuss_date = enrich.kips_dates[kip]['kip_max_discuss']
kip_fields['kip_discuss_inactive_days'] = \
get_time_diff_days(max_discuss_date.replace(tzinfo=None),
datetime.utcnow())
if eitem['kip_is_vote']:
kip_fields["kip_voting_time_days"] = \
get_time_diff_days(enrich.kips_dates[kip]['kip_min_vote'],
enrich.kips_dates[kip]['kip_max_vote'])
# Detect first and last discuss messages
if kip_date == enrich.kips_dates[kip]['kip_min_vote']:
kip_fields['kip_is_first_vote'] = 1
kip_fields['kip_start_end'] = 'voting_start'
elif kip_date == enrich.kips_dates[kip]['kip_max_vote']:
kip_fields['kip_is_last_vote'] = 1
kip_fields['kip_start_end'] = 'voting_end'
# Detect discussion status
kip_fields['kip_status'] = 'voting'
max_vote_date = enrich.kips_dates[kip]['kip_max_vote']
kip_fields['kip_voting_inactive_days'] = \
get_time_diff_days(max_vote_date.replace(tzinfo=None),
datetime.utcnow())
# Now check if there is a result from enrich.kips_scores
kip_fields['kip_result'] = lazy_result(enrich.kips_scores[kip])
if kip_fields['kip_result'] == 1:
kip_fields['kip_status'] = 'adopted'
elif kip_fields['kip_result'] == -1:
kip_fields['kip_status'] = 'discarded'
# And now change the status inactive
if kip_fields['kip_status'] not in ['adopted', 'discarded']:
inactive_days = kip_fields['kip_discuss_inactive_days']
if inactive_days and inactive_days > max_inactive_days:
kip_fields['kip_status'] = 'inactive'
inactive_days = kip_fields['kip_voting_inactive_days']
if inactive_days and inactive_days > max_inactive_days:
kip_fields['kip_status'] = 'inactive'
# The final status is in the kip_is_last_discuss or kip_is_last_vote
# It will be filled in the next enrichment round
eitem.update(kip_fields)
if eitem['kip'] not in enrich.kips_final_status:
enrich.kips_final_status[kip] = None
if eitem['kip_is_last_discuss'] and not enrich.kips_final_status[kip]:
enrich.kips_final_status[kip] = kip_fields['kip_status']
if eitem['kip_is_last_vote']:
enrich.kips_final_status[kip] = kip_fields['kip_status']
yield eitem
total += 1
logger.info("Total eitems with kafka extra kip fields %i", total)
def add_kip_fields(enrich):
total = 0
enrich.kips_dates = {
0: {
"kip_min_discuss": None,
"kip_max_discuss": None,
"kip_min_vote": None,
"kip_max_vote": None,
}
}
enrich.kips_scores = {}
# First iteration
for eitem in enrich.fetch():
kip_fields = {
"kip_is_vote": 0,
"kip_is_discuss": 0,
"kip_vote": 0,
"kip_binding": 0,
"kip": 0,
"kip_type": "general"
}
kip = extract_kip(eitem['Subject'])
if not kip:
# It is not a KIP message
continue
if kip not in enrich.kips_dates:
enrich.kips_dates[kip] = {}
if kip not in enrich.kips_scores:
enrich.kips_scores[kip] = []
kip_date = parser.parse(eitem["email_date"])
# Analyze the subject to fill the kip fields
if '[discuss]' in eitem['Subject'].lower() or \
'[kip-discussion]'in eitem['Subject'].lower() or \
'[discussion]'in eitem['Subject'].lower():
kip_fields['kip_is_discuss'] = 1
kip_fields['kip_type'] = "discuss"
kip_fields['kip'] = kip
# Update kip discuss dates
if "kip_min_discuss" not in enrich.kips_dates[kip]:
enrich.kips_dates[kip].update({
"kip_min_discuss": kip_date,
"kip_max_discuss": kip_date
})
else:
if enrich.kips_dates[kip]["kip_min_discuss"] >= kip_date:
enrich.kips_dates[kip]["kip_min_discuss"] = kip_date
if enrich.kips_dates[kip]["kip_max_discuss"] <= kip_date:
enrich.kips_dates[kip]["kip_max_discuss"] = kip_date
if '[vote]' in eitem['Subject'].lower():
kip_fields['kip_is_vote'] = 1
kip_fields['kip_type'] = "vote"
kip_fields['kip'] = kip
if 'body_extract' in eitem:
(vote, binding) = extract_vote_and_binding(eitem['body_extract'])
enrich.kips_scores[kip] += [(vote, binding)]
kip_fields['kip_vote'] = vote
kip_fields['kip_binding'] = binding
else:
logger.debug("Message %s without body", eitem['Subject'])
# Update kip discuss dates
if "kip_min_vote" not in enrich.kips_dates[kip]:
enrich.kips_dates[kip].update({
"kip_min_vote": kip_date,
"kip_max_vote": kip_date
})
else:
if enrich.kips_dates[kip]["kip_min_vote"] >= kip_date:
enrich.kips_dates[kip]["kip_min_vote"] = kip_date
if enrich.kips_dates[kip]["kip_max_vote"] <= kip_date:
enrich.kips_dates[kip]["kip_max_vote"] = kip_date
eitem.update(kip_fields)
yield eitem
total += 1
logger.info("Total eitems with kafka kip fields %i", total)
logger.debug("Doing kafka_kip study from %s", enrich.elastic.anonymize_url(enrich.elastic.index_url))
# First iteration with the basic fields
eitems = add_kip_fields(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id())
# Second iteration with the final time and status fields
eitems = add_kip_time_status_fields(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id())
# Third iteration to compute the end status field for all KIPs
eitems = add_kip_final_status_field(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id()) | [
" Kafka Improvement Proposals process study ",
" Extracts the vote and binding for a KIP process included in message body ",
" Extracts a KIP number from an email subject ",
" Compute the result of a votation using lazy consensus\n which requires 3 binding +1 votes and no binding vetoes.\n ",
" Add kip final status field ",
" Add kip fields with final status and times ",
" Add extra fields needed for kip analysis"
]
|
Please provide a description of the function:def add_identity(cls, db, identity, backend):
uuid = None
try:
uuid = api.add_identity(db, backend, identity['email'],
identity['name'], identity['username'])
logger.debug("New sortinghat identity %s %s,%s,%s ",
uuid, identity['username'], identity['name'], identity['email'])
profile = {"name": identity['name'] if identity['name'] else identity['username'],
"email": identity['email']}
api.edit_profile(db, uuid, **profile)
except AlreadyExistsError as ex:
uuid = ex.eid
except InvalidValueError as ex:
logger.warning("Trying to add a None identity. Ignoring it.")
except UnicodeEncodeError as ex:
logger.warning("UnicodeEncodeError. Ignoring it. %s %s %s",
identity['email'], identity['name'],
identity['username'])
except Exception as ex:
logger.warning("Unknown exception adding identity. Ignoring it. %s %s %s",
identity['email'], identity['name'],
identity['username'], exc_info=True)
if 'company' in identity and identity['company'] is not None:
try:
api.add_organization(db, identity['company'])
api.add_enrollment(db, uuid, identity['company'],
datetime(1900, 1, 1),
datetime(2100, 1, 1))
except AlreadyExistsError:
pass
return uuid | [
" Load and identity list from backend in Sorting Hat "
]
|
Please provide a description of the function:def add_identities(cls, db, identities, backend):
logger.info("Adding the identities to SortingHat")
total = 0
for identity in identities:
try:
cls.add_identity(db, identity, backend)
total += 1
except Exception as e:
logger.error("Unexcepted error when adding identities: %s" % e)
continue
logger.info("Total identities added to SH: %i", total) | [
" Load identities list from backend in Sorting Hat "
]
|
Please provide a description of the function:def remove_identity(cls, sh_db, ident_id):
success = False
try:
api.delete_identity(sh_db, ident_id)
logger.debug("Identity %s deleted", ident_id)
success = True
except Exception as e:
logger.debug("Identity not deleted due to %s", str(e))
return success | [
"Delete an identity from SortingHat.\n\n :param sh_db: SortingHat database\n :param ident_id: identity identifier\n "
]
|
Please provide a description of the function:def remove_unique_identity(cls, sh_db, uuid):
success = False
try:
api.delete_unique_identity(sh_db, uuid)
logger.debug("Unique identity %s deleted", uuid)
success = True
except Exception as e:
logger.debug("Unique identity not deleted due to %s", str(e))
return success | [
"Delete a unique identity from SortingHat.\n\n :param sh_db: SortingHat database\n :param uuid: Unique identity identifier\n "
]
|
Please provide a description of the function:def unique_identities(cls, sh_db):
try:
for unique_identity in api.unique_identities(sh_db):
yield unique_identity
except Exception as e:
logger.debug("Unique identities not returned from SortingHat due to %s", str(e)) | [
"List the unique identities available in SortingHat.\n\n :param sh_db: SortingHat database\n "
]
|
Please provide a description of the function:def get_identities(self, item):
data = item['data']
if 'assigned_to' in data:
user = self.get_sh_identity(data, 'assigned_to')
yield user
author = self.get_sh_identity(data, 'author')
yield author | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_identities(self, item):
# question
user = self.get_sh_identity(item, self.get_field_author())
yield user
# answers
if 'answers' in item['data']:
for answer in item['data']['answers']:
# avoid "answered_by" : "This post is a wiki" corner case
if type(answer['answered_by']) is dict:
user = self.get_sh_identity(answer['answered_by'])
yield user
if 'comments' in answer:
for comment in answer['comments']:
commenter = self.get_sh_identity(comment)
yield commenter | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_identities(self, item):
user = self.get_sh_identity(item, self.get_field_author())
yield user
# Get the identities from the releases
for release in item['data']['releases']:
user = self.get_sh_identity(release['module'], self.get_field_author())
yield user | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_rich_events(self, item):
module = item['data']
if not item['data']['releases']:
return []
for release in item['data']['releases']:
event = self.get_rich_item(item)
# Update specific fields for this release
event["uuid"] += "_" + release['slug']
event["author_url"] = 'https://forge.puppet.com/' + release['module']['owner']['username']
event["gravatar_id"] = release['module']['owner']['gravatar_id']
event["downloads"] = release['downloads']
event["slug"] = release['slug']
event["version"] = release['version']
event["uri"] = release['uri']
event["validation_score"] = release['validation_score']
event["homepage_url"] = None
if 'project_page' in release['metadata']:
event["homepage_url"] = release['metadata']['project_page']
event["issues_url"] = None
if "issues_url" in release['metadata']:
event["issues_url"] = release['metadata']['issues_url']
event["tags"] = release['tags']
event["license"] = release['metadata']['license']
event["source_url"] = release['metadata']['source']
event["summary"] = release['metadata']['summary']
event["metadata__updated_on"] = parser.parse(release['updated_at']).isoformat()
if self.sortinghat:
release["metadata__updated_on"] = event["metadata__updated_on"] # Needed in get_item_sh logic
event.update(self.get_item_sh(release))
if self.prjs_map:
event.update(self.get_item_project(event))
event.update(self.get_grimoire_fields(release["created_at"], "release"))
yield event | [
"\n Get the enriched events related to a module\n "
]
|
Please provide a description of the function:def _connect(self):
try:
db = pymysql.connect(user=self.user, passwd=self.passwd,
host=self.host, port=self.port,
db=self.shdb, use_unicode=True)
return db, db.cursor()
except Exception:
logger.error("Database connection error")
raise | [
"Connect to the MySQL database.\n "
]
|
Please provide a description of the function:def execute(self, query):
# sql = query.format(scm_db = self.scmdb,
# sh_db = self.shdb,
# prj_db = self.prjdb)
results = int(self.cursor.execute(query))
if results > 0:
result1 = self.cursor.fetchall()
return result1
else:
return [] | [
"Execute an SQL query with the corresponding database.\n The query can be \"templated\" with {scm_db} and {sh_db}.\n "
]
|
Please provide a description of the function:def feed_arthur():
logger.info("Collecting items from redis queue")
db_url = 'redis://localhost/8'
conn = redis.StrictRedis.from_url(db_url)
logger.debug("Redis connection stablished with %s.", db_url)
# Get and remove queued items in an atomic transaction
pipe = conn.pipeline()
pipe.lrange(Q_STORAGE_ITEMS, 0, -1)
pipe.ltrim(Q_STORAGE_ITEMS, 1, 0)
items = pipe.execute()[0]
for item in items:
arthur_item = pickle.loads(item)
if arthur_item['tag'] not in arthur_items:
arthur_items[arthur_item['tag']] = []
arthur_items[arthur_item['tag']].append(arthur_item)
for tag in arthur_items:
logger.debug("Items for %s: %i", tag, len(arthur_items[tag])) | [
" Feed Ocean with backend data collected from arthur redis queue"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.