text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(cell_type, sign, order, dtype):
if cell_type == CellType.triangle or cell_type == CellType.quadrilateral:
mesh = create_unit_square(MPI.COMM_WORLD, 2, 2, cell_type, dtype=dtype)
else:
mesh = create_unit_cube(MPI.COMM_WORLD, 2, 2, 2, cell_type, dtype=dtype)
U_el = mixed_element([
element(basix.ElementFamily.P, cell_type.name, order, rank=1),
element(basix.ElementFamily.N1E, cell_type.name, order)])
U = FunctionSpace(mesh, U_el)
u, p = ufl.TrialFunctions(U)
v, q = ufl.TestFunctions(U)
f = form(inner(u, v) * ufl.dx + inner(p, q)(sign) * ufl.dS, dtype=dtype)
A = dolfinx.fem.assemble_matrix(f)
A.scatter_reverse()
tol = np.sqrt(np.finfo(dtype).eps)
check_symmetry(A, tol) | [
9,
1474,
669,
798,
669,
1029
] |
def METHOD_NAME(self):
np.random.permutation(self.n) | [
104,
2840,
962
] |
def METHOD_NAME(R: npt.ArrayLike, strict_check: bool = ...) -> np.ndarray: ... | [
4437,
3413,
4438,
280,
923,
430
] |
def METHOD_NAME(self):
"""
Retrieves the name of the thermal
Returns:
string: The name of the thermal
"""
return self.name | [
19,
156
] |
def METHOD_NAME(cmd):
print(f'Running Forseti command: {" ".join(cmd)}')
return subprocess.run(cmd, stderr=subprocess.PIPE,
stdout=subprocess.PIPE) | [
22,
356
] |
def METHOD_NAME(self, script):
"""register command line arguments
:param script: Dirac.Core.Base Script Class
:type script: DIRAC.Core.Base.Script
"""
script.registerSwitch("G:", "GroupSize=", "Number of Files per transformation task", self.setGroupSize)
script.registerSwitch("R:", "GroupName=", "TransformationGroup Name", self.setGroupName)
script.registerSwitch("S:", "SourceSEs=", "SourceSE(s) to use, comma separated list", self.setSourceSE)
script.registerSwitch("N:", "Extraname=", "String to append to transformation name", self.setExtraname)
script.registerSwitch("P:", "Plugin=", "Plugin to use for transformation", self.setPlugin)
script.registerSwitch("T:", "Flavour=", "Flavour to create: Replication or Moving", self.setTransFlavour)
script.registerSwitch("K:", "MetaKey=", "Meta Key to use: TransformationID", self.setMetaKey)
script.registerSwitch("M:", "MetaData=", "MetaData to use Key/Value Pairs: 'DataType:REC,'", self.setMetadata)
script.registerSwitch("x", "Enable", "Enable the transformation creation, otherwise dry-run", self.setEnable)
useMessage = []
useMessage.append("Create one replication transformation for each MetaValue given")
useMessage.append("Is running in dry-run mode, unless enabled with -x")
useMessage.append("MetaValue and TargetSEs can be comma separated lists")
useMessage.append("Usage:")
useMessage.append(
" %s <MetaValue1[,val2,val3]> <TargetSEs> [-G<Files>] [-S<SourceSEs>]"
"[-N<ExtraName>] [-T<Type>] [-M<Key>] [-K...] -x" % script.scriptName
)
script.setUsageMessage("\n".join(useMessage)) | [
372,
6612
] |
def METHOD_NAME(self):
"""
Test softwareupdate.list_available
"""
# Can't predict what will be returned, so can only test that the return
# is the correct type, dict
self.assertIsInstance(self.run_function("softwareupdate.list_available"), dict) | [
9,
245,
1272
] |
def METHOD_NAME(function_name, state, prompt, input_ids, input_embeds):
for extension, _ in iterator():
if hasattr(extension, function_name):
prompt, input_ids, input_embeds = getattr(extension, function_name)(state, prompt, input_ids, input_embeds)
return prompt, input_ids, input_embeds | [
231,
1345,
583
] |
def METHOD_NAME(self) -> None:
"""Test _validate_supported_codec raise ValueError."""
with pytest.raises(ValidationError) as excinfo:
ArgsDataModel(codec="foo")
assert excinfo.value.errors() == [
{
"loc": ("codec",),
"msg": f"Codec 'foo' must be one of: {', '.join(CODECS)}",
"type": "value_error",
}
] | [
9,
187,
616,
7108,
241,
99,
168
] |
def METHOD_NAME(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""Calculate circular fingerprint.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
A numpy array of circular fingerprint.
"""
try:
from rdkit import Chem, DataStructs
from rdkit.Chem import rdMolDescriptors
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.sparse:
info: Dict = {}
fp = rdMolDescriptors.GetMorganFingerprint(
datapoint,
self.radius,
useChirality=self.chiral,
useBondTypes=self.bonds,
useFeatures=self.features,
bitInfo=info)
fp = fp.GetNonzeroElements() # convert to a dict
# generate SMILES for fragments
if self.smiles:
fp_smiles = {}
for fragment_id, count in fp.items():
root, radius = info[fragment_id][0]
env = Chem.FindAtomEnvironmentOfRadiusN(
datapoint, radius, root)
frag = Chem.PathToSubmol(datapoint, env)
smiles = Chem.MolToSmiles(frag)
fp_smiles[fragment_id] = {'smiles': smiles, 'count': count}
fp = fp_smiles
else:
if self.is_counts_based:
fp_sparse = rdMolDescriptors.GetHashedMorganFingerprint(
datapoint,
self.radius,
nBits=self.size,
useChirality=self.chiral,
useBondTypes=self.bonds,
useFeatures=self.features)
fp = np.zeros(
(self.size,), dtype=float
) # initialise numpy array of zeros (shape: (required size,))
DataStructs.ConvertToNumpyArray(fp_sparse, fp)
else:
fp = rdMolDescriptors.GetMorganFingerprintAsBitVect(
datapoint,
self.radius,
nBits=self.size,
useChirality=self.chiral,
useBondTypes=self.bonds,
useFeatures=self.features)
fp = np.asarray(fp, dtype=float)
return fp | [
11246
] |
def METHOD_NAME(fiberassignfiles):
"""
Return table of TARGETID, SUBPRIORITY used in input fiberassign files
Args:
fiberassignfiles: list of input fiberassign files
Returns: dict[dark|bright|sky] = ndarray with columns TARGETID, SUBPRIORITY
for targets matching that observing condition (or sky targets)
"""
log = get_logger()
# - allow duplicate inputs, but don't process multiple tiles
processed = set()
subprio_tables = dict(dark=list(), bright=list(), sky=list())
for filename in fiberassignfiles:
# - Have we already processed this file (e.g. from an earlier expid)?
basename = os.path.basename(filename)
if basename in processed:
continue
else:
processed.add(basename)
with fitsio.FITS(filename) as fx:
hdr = fx[0].read_header()
if 'SURVEY' not in hdr:
log.warning(f"Skipping {filename} missing SURVEY keyword")
continue
if 'FAPRGRM' not in hdr:
log.warning(f"Skipping {filename} missing FAPRGRM keyword")
continue
program = hdr['FAPRGRM'].lower()
if program not in ('dark', 'bright'):
log.warning(f"Skipping {filename} with FAPRGRM={program}")
continue
if hdr['SURVEY'].lower() != 'main':
log.info(f"Skipping {filename} with SURVEY {hdr['SURVEY']} != main")
continue
log.info(f'Reading {filename}')
sp = fx['TARGETS'].read(columns=['TARGETID', 'SUBPRIORITY', 'DESI_TARGET'])
# - Separate skies from non-skies
skymask = desi_mask.mask('SKY|SUPP_SKY|BAD_SKY')
iisky = (sp['DESI_TARGET'] & skymask) != 0
subprio_tables['sky'].append(sp[iisky])
subprio_tables[program].append(sp[~iisky])
log.info('Stacking individual fiberassign inputs')
for program in subprio_tables.keys():
subprio_tables[program] = np.hstack(subprio_tables[program])
# - QA checks on basic assumptions about uniqueness
log.info('Checking assumptions about TARGETID:SUBPRIORITY uniqueness')
for program in ['dark', 'bright', 'sky']:
subprio = subprio_tables[program]
tid, sortedidx = np.unique(subprio['TARGETID'], return_index=True)
# - sky can appear multiple times, but with same SUBPRIORITY
if program == 'sky':
subpriodict = dict()
for targetid, sp in zip(
subprio['TARGETID'], subprio['SUBPRIORITY']):
if targetid in subpriodict:
if sp != subpriodict[targetid]:
log.error(f'{program} TARGETID {targetid} has multiple subpriorities')
else:
subpriodict[targetid] = sp
# - but other programs should have each TARGETID exactly once
else:
if len(tid) != len(subprio):
log.error(f'Some {program} TARGETIDs appear multiple times')
log.info(f'Sorting {program} targets by TARGETID')
subprio_tables[program] = subprio[sortedidx]
return subprio_tables | [
19,
-1,
-1
] |
def METHOD_NAME(
response: lib.Element, settings: provider_utils.Settings
) -> typing.Tuple[models.ShipmentDetails, typing.List[models.Message]]:
errors = provider_error.parse_error_response(response, settings)
details = _extract_details(response, settings)
return details, errors | [
214,
8260,
17
] |
def METHOD_NAME(self, request):
with (Path(plugins_path) / f"{request.param}.py").open(encoding="utf-8") as handle:
METHOD_NAME = next(tokenize.generate_tokens(handle.readline), None)
assert type(METHOD_NAME) is tokenize.TokenInfo, "Parses the first token"
assert METHOD_NAME.type == tokenize.STRING, "First token is a string"
return METHOD_NAME | [
14367
] |
def METHOD_NAME(self, _, **attrs):
raise NotImplementedError(
'upload_to_gcs should not be called for CensusPopEstimatesSC') | [
172,
24,
4191
] |
def METHOD_NAME(
action: str,
msg: Any = "",
style: Optional[IntSeq] = None,
indent: int = 10,
quiet: Union[bool, StrictBool] = False,
file_: TextIO = sys.stdout,
) -> Optional[str]:
"""Print string with common format."""
if quiet:
return None # HACK: Satisfy MyPy
_msg = str(msg)
action = action.rjust(indent, " ")
if not style:
return action + _msg
out = style + [action] + Style.RESET + [INDENT, _msg] # type: ignore
print(*out, sep="", file=file_)
return None # HACK: Satisfy MyPy | [
3086
] |
def METHOD_NAME(self, section, option, **kwargs):
kwargs.setdefault('vars', self._get_section_env_vars(section))
return super(Settings, self).METHOD_NAME(section, option, **kwargs) | [
19
] |
def METHOD_NAME(collection, filters, regex, ignorecase, replace=True):
result_filters = dict()
queryset = collection.objects
if "order_by" in filters:
queryset = queryset.order_by(filters.pop("order_by"))
for key, value in filters.items():
key = key.replace(".", "__")
if replace:
if key in SEARCH_REPLACE:
key = SEARCH_REPLACE[key]
if regex and isinstance(value, str):
flags = 0
if ignorecase:
flags |= re.I
value = re.compile(value, flags=flags)
if isinstance(value, list) and not key.endswith("__in"):
key += "__all"
result_filters[key] = value
q = Q()
for alias in collection.SEARCH_ALIASES:
if alias in filters:
q &= Q(**{collection.SEARCH_ALIASES[alias]: result_filters[alias]}) | Q(
**{alias: result_filters[alias]}
)
result_filters.pop(alias)
print("Filter: {}".format(result_filters), q.to_query(collection))
return queryset.filter(**result_filters).filter(q) | [
19,
2386
] |
def METHOD_NAME(self, key: str, bucket: str = '') -> bytes:
if not bucket:
bucket = self.browser_bucket
s3_object = self.s3_client.get_object(Bucket=bucket, Key=key)
return s3_object['Body'].read() | [
203,
171
] |
def METHOD_NAME( self, section ):
pass | [
4007,
1287,
538
] |
def METHOD_NAME(self):
config = {
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"embeddings_initializer": tf.keras.initializers.serialize(
self.embeddings_initializer
),
"embeddings_regularizer": tf.keras.regularizers.serialize(
self.embeddings_regularizer
),
"embeddings_constraint": tf.keras.constraints.serialize(
self.embeddings_constraint
),
"mask_zero": self.mask_zero,
"combiner": self.combiner,
}
base_config = super(EmbeddingBag, self).METHOD_NAME()
return dict(list(base_config.items()) + list(config.items())) | [
19,
200
] |
async def METHOD_NAME(self, tenant_id, labels):
# create an agent
agent_id = await api.agents.register_agent(
tenant_id=tenant_id,
labels=labels,
)
agent = await models.Agent.where(id=agent_id).first(
{"id", "tenant_id", "labels"}
)
assert agent.id == agent_id
assert agent.tenant_id == tenant_id
assert agent.labels == [] | [
9,
372,
1849,
41,
35,
415
] |
def METHOD_NAME(self):
p = subprocess.Popen(self.GET_HWSKU_CMD, universal_newlines=True, stdout=subprocess.PIPE)
out, err = p.communicate()
return out.rstrip('\n') | [
19,
4162,
156
] |
def METHOD_NAME(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
with self.assertRaisesRegex(
errors.InvalidArgumentError, "cannot be JSON decoded."
):
response = server.get(
"",
query_string={
"tensorBoardFeatureFlags": "some_invalid_json {} {}",
},
) | [
9,
539,
144,
41,
763,
130,
13831
] |
def METHOD_NAME(argb, alpha=None) -> str:
"""
This function can return either an hexadecimal or rgba-formatted color code.
By default this function returns hexadecimal color codes.
If alpha parameter is specified, then the function will return
an rgba-formatted color code.
"""
rgba_base = "rgba({0}, {1}, {2}, {3})"
red = monet.redFromArgb(argb)
green = monet.greenFromArgb(argb)
blue = monet.blueFromArgb(argb)
if not alpha:
alpha = monet.alphaFromArgb(argb)
if alpha in (255, 0.0):
return monet.hexFromArgb(argb)
return rgba_base.format(red, green, blue, alpha) | [
-1,
24,
36,
544
] |
def METHOD_NAME(self):
self.trading_control.current_transaction_count = 5069
self.trading_control.current_failed_transaction_count = 5069
self.trading_control._next_hour = None
self.trading_control._set_next_hour()
now = datetime.datetime.utcnow()
now_1 = (now + datetime.timedelta(hours=1)).replace(
minute=0, second=0, microsecond=0
)
self.assertEqual(self.trading_control._next_hour, now_1)
self.assertEqual(self.trading_control.current_transaction_count, 0)
self.assertEqual(self.trading_control.current_failed_transaction_count, 0) | [
9,
0,
243,
3425
] |
def METHOD_NAME(text: str) -> "Placement":
"""Create a placement from a text string.
Parameters
----------
text: str
The text string.
Returns
-------
placement: Placement
The placement.
"""
return _ffi_api.PlacementFromText(text) | [
280,
526
] |
def METHOD_NAME(self, X):
if len(self.classifiers_list) == 1:
return self.classifiers_list[0].METHOD_NAME(X)
else:
# Take a voting of the classifiers
predictions_list = [
classifier.METHOD_NAME(X) for classifier in self.classifiers_list
]
df = pd.DataFrame(predictions_list).transpose()
predictions = df.mode(axis=1)
if (
predictions.shape[1] > 1
): # When there are multiple modes, pick the first one
predictions = predictions.iloc[:, 0]
predictions = predictions.squeeze() # converts a dataframe to series.
return predictions | [
2103
] |
def METHOD_NAME(self, *args, **kwargs):
self.resource.model.create_object({"id": "abc"})
self.resource.model.get_record(record_id="abc")
message = "`get_record()` is deprecated, use `get_object()` instead."
self.mocked_warnings.assert_called_with(message, DeprecationWarning) | [
9,
19,
148
] |
def METHOD_NAME(arr1, arr2, name, rows=None):
"""
The first must be object
"""
if rows is None:
rows = np.arange(arr1.size)
for i, row in enumerate(rows):
if ((sys.version_info >= (3, 0, 0) and isinstance(arr2[i], bytes))
or isinstance(arr2[i], str)):
if sys.version_info >= (3, 0, 0) and isinstance(arr1[row], bytes):
_arr1row = arr1[row].decode('ascii')
else:
_arr1row = arr1[row]
assert _arr1row == arr2[i], (
"%s str el %d equal" % (name, i)
)
else:
delement = arr2[i]
orig = arr1[row]
s = len(orig)
compare_array(
orig, delement[0:s], "%s num el %d equal" % (name, i)
) | [
979,
279,
877
] |
def METHOD_NAME(self):
self.mkfs()
passphrase_new = 'sd982jhd'
proc = subprocess.Popen(
self.s3ql_cmd_argv('s3qladm')
+ [
'--quiet',
'--log',
'none',
'--authfile',
'/dev/null',
'passphrase',
self.storage_url,
],
stdin=subprocess.PIPE,
universal_newlines=True,
)
print(self.passphrase, file=proc.stdin)
print(passphrase_new, file=proc.stdin)
print(passphrase_new, file=proc.stdin)
proc.stdin.close()
self.assertEqual(proc.wait(), 0)
plain_backend = local.Backend(Namespace(storage_url=self.storage_url))
backend = ComprencBackend(passphrase_new.encode(), ('zlib', 6), plain_backend)
backend.fetch('s3ql_passphrase') # will fail with wrong pw | [
9,
4460
] |
def METHOD_NAME(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.METHOD_NAME(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.METHOD_NAME(key, node, None)
self.METHOD_NAME(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver() | [
183,
1716
] |
def METHOD_NAME(symbol):
"""Strips rust function hashes of the given symbol."""
if symbol:
match = HASH_FUNC_RE.match(symbol)
if match:
return match.group(1)
return symbol | [
1360,
1608
] |
def METHOD_NAME(self):
query = QueryDict("type=small_numbers&paper_size=a4")
generator = SortingNetworkCardsResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Small numbers (1 to 10) - a4"
) | [
9,
3332,
565,
3333,
3334
] |
def METHOD_NAME(self, name, contents):
path = os.path.join(self._md_dir, name)
with open(path, 'w') as _:
_.write(contents)
touch(path)
return path | [
129,
941
] |
def METHOD_NAME(self):
from kivy.lang import Builder
from kivy.uix.behaviors.knspace import knspace, KNSpaceBehavior
from kivy.uix.widget import Widget
class NamedWidget(KNSpaceBehavior, Widget):
pass
nw = NamedWidget()
w = Widget()
w2 = Widget()
before = knspace.fork()
knspace.widget2 = w
after = knspace.fork()
self.assertEqual(knspace.widget2, w)
self.assertEqual(before.widget2, w)
self.assertEqual(after.widget2, w)
child = knspace.fork()
child2 = knspace.fork()
grandchild = child.fork()
child.widget3 = w2
self.assertEqual(grandchild.widget3, w2)
self.assertEqual(child.widget3, w2)
# this could actually be none rather than raising, depending
# on when the class was instantiated. So if this fails, change the
# test to assert is none.
self.assertIsNone(knspace.widget3)
grandchild.parent = child2
self.assertIsNone(grandchild.widget3) | [
9,
6939
] |
def METHOD_NAME(manager):
manager.test_window("one")
assert_dimensions(manager, 0, 0, 796, 596)
manager.test_window("two")
assert_dimensions(manager, 0, 300, 784, 284)
manager.test_window("three")
assert_dimensions(manager, 0, 400, 784, 184) | [
9,
-1,
97,
5112
] |
def METHOD_NAME(cls, tls_config):
if not isinstance(tls_config, dict):
raise TypeError('tls_config should be of type dict')
if 'server_cert_path' not in tls_config:
raise ValueError('server_cert_path is not defined in tls_config')
if ('client_key_path' in tls_config) != ('client_cert_path' in tls_config):
raise ValueError('none or both client_key_path and client_cert_path '
'are required in tls_config')
valid_keys = ['server_cert_path', 'client_key_path', 'client_cert_path']
for key in tls_config:
if key not in valid_keys:
raise ValueError(f'{key} is not valid tls_config key')
if not isinstance(tls_config[key], str):
raise TypeError(f'{key} type should be string but is type '
f'{type(tls_config[key]).__name__}')
if not os.path.isfile(tls_config[key]):
raise ValueError(f'{tls_config[key]} is not valid path to file') | [
250,
1245,
200
] |
def METHOD_NAME(dti, start, length):
"""Return start and end dates of a sequence as tuple
:param <DateTimeIndex> dti: datetime series of working days
:param <int> start: index of start
:param <int> length: number of sequential days
:return <Tuple[pd.Timestamp, pd.Timestamp]>: tuple of start and end date
"""
tup = (dti[start], dti[start + (length - 1)])
return tup | [
13863
] |
def METHOD_NAME(self, runspec: RunObject, execution):
internal_code, original_handler = self.get_internal_parameters(runspec)
if internal_code:
task_parameters = runspec.spec.parameters.get("task_parameters", {})
task_parameters["spark_app_code"] = internal_code
if original_handler:
task_parameters[
"original_handler"
] = original_handler # in order to handle reruns.
runspec.spec.parameters["task_parameters"] = task_parameters
current_file = os.path.abspath(__file__)
current_dir = os.path.dirname(current_file)
databricks_runtime_wrap_path = os.path.join(
current_dir, "databricks_wrapper.py"
)
with open(
databricks_runtime_wrap_path, "r"
) as databricks_runtime_wrap_file:
wrap_code = databricks_runtime_wrap_file.read()
wrap_code = b64encode(wrap_code.encode("utf-8")).decode("utf-8")
self.spec.build.functionSourceCode = wrap_code
runspec.spec.handler = "run_mlrun_databricks_job"
else:
raise ValueError("Databricks function must be provided with user code") | [
709,
22
] |
def METHOD_NAME(self, editable, tree, path):
if editable.props.editing_canceled == False:
self.emit('edited', path, editable.get_text())
tree.grab_focus() | [
3631,
1658
] |
def METHOD_NAME():
spec = Pattern()
x = np.linspace(0, 10)
y = np.sin(x)
spec.data = x, y
new_x, new_y = spec.data
assert np.array_equal(new_x, x)
assert np.array_equal(new_y, y) | [
9,
1333,
80,
365
] |
def METHOD_NAME(*args, **kwargs): # pylint: disable=unused-argument
from azure.core.credentials import AccessToken
import time
fake_raw_token = 'top-secret-token-for-you'
now = int(time.time())
return AccessToken(fake_raw_token, now + 3600) | [
19,
466
] |
def METHOD_NAME(a):
g = make_graph()
uri = abstract_uri(a)
g.add((uri, RDF.type, NDNP['Awardee']))
g.add((uri, FOAF['name'], Literal(a.name)))
for batch in a.batches.all():
g.add((abstract_uri(batch), DCTERMS['creator'], uri))
g.add((uri, RDFS.isDefinedBy, rdf_uri(a)))
for essay in a.essays.all():
g.add((URIRef(essay.url), DCTERMS['creator'], uri))
g.add((URIRef(essay.url), RDF['type'], NDNP['Essay']))
if a.org_code == 'dlc':
# important for resource maps that reference loc as dc:creator
g.add((uri, FOAF['mbox'], Literal('[email protected]')))
g.add((uri, OWL['sameAs'],
URIRef("http://dbpedia.org/resource/Library_of_Congress")))
return g | [
16034,
24,
303
] |
def METHOD_NAME(self) -> Speed:
return self.flight.unit_type.preferred_patrol_speed(
self.layout.patrol_start.alt
) | [
9489,
1942
] |
def METHOD_NAME(self):
# given:
g = ConjunctiveGraph()
g.add((BNode(), RDF.value, Literal("""<p """, datatype=RDF.XMLLiteral)))
# when:
xmlrepr = g.serialize(format="pretty-xml")
# then:
assert (
"""<rdf:value rdf:datatype="http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral"><p """
in xmlrepr
) | [
9,
885,
31,
18154
] |
def METHOD_NAME(con_build) -> None: # type: ignore
# Ensures that the ConsumerBuilders are assigning a
# not-None value to the required attributes
# Depending on the attribute, we can verify this
# to different degrees
assert con_build.storage == get_writable_storage(test_storage_key)
assert con_build.consumer_group == consumer_group_name
assert isinstance(con_build.raw_topic, Topic)
assert isinstance(con_build.metrics, MetricsBackend)
assert con_build.max_batch_size == 3
assert con_build.max_batch_time_ms == 4
assert con_build.auto_offset_reset == "earliest"
assert con_build.queued_max_messages_kbytes == 1
assert con_build.queued_min_messages == 2 | [
9,
3553,
348,
256,
665,
177
] |
METHOD_NAME(self, mesh): | [
238
] |
def METHOD_NAME(self, force):
md = self.content["metadata"]
if "geographicalExtent" in md:
bbox = md["geographicalExtent"]
else:
bbox = md["bbox"]
return [bbox[0], bbox[1], bbox[3], bbox[4]] | [
7862
] |
def METHOD_NAME(data, rois, pooled_size, spatial_scale, sample_ratio=-1, layout="NCHW", mode="avg"):
"""ROI align operator.
Parameters
----------
data : relay.Expr
4-D tensor with shape [batch, channel, height, width]
rois : relay.Expr
2-D tensor with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : list/tuple of two ints
output size
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
mode : str, Optional
The pooling method. Relay supports two methods, 'avg' and 'max'. Default is 'avg'.
Returns
-------
output : relay.Expr
4-D tensor with shape [num_roi, channel, pooled_size, pooled_size]
"""
return _make.METHOD_NAME(data, rois, pooled_size, spatial_scale, sample_ratio, layout, mode) | [
65,
66
] |
def METHOD_NAME(a, axis=None, abs_deviation=None, mask=None):
if abs_deviation is None:
abs_deviation = absolute_deviation(a, axis=axis, mask=mask)
return median(abs_deviation, axis=axis, mask=mask) | [
6778,
4653,
8516
] |
def METHOD_NAME(url, aviname, streetview_server_ipaddr): # data must be dictionary type
cmd_url = url + '/Apply/' # You have to implement @app.route('/Apply/') in server.py part
cap = cv2.VideoCapture(aviname)
frame_cnt = 0
while(cap.isOpened()):
ret, frame = cap.read()
if not ret :
continue
frame_cnt += 1
if (frame_cnt % 30) != 0:
continue
K = 3
frame = frame.astype(int)
frame = frame[:, :, [2, 1, 0]] # bgr to rgb
image_size, image_data = serialize_image(frame)
gps_lat = 36.381438 + 0.00001
gps_lon = 127.378867 + 0.00001
gps_accuracy = 1
data_dict = { 'K' : K, 'image_size' : image_size, 'image_data' : image_data,
'gps_lat' : gps_lat, 'gps_lon' : gps_lon, 'gps_accuracy' : gps_accuracy,
'streetview_server_ipaddr' : streetview_server_ipaddr }
data_json = json.dumps(data_dict)
rcv_json = requests.post(cmd_url, json=data_json) # input : json data, it takes some second to return
rcv_dict = rcv_json.json() # .json method changes json to dictionary
vps_image_data = rcv_dict['vps_IDandConf']
print(frame_cnt, vps_image_data)
## display for debugging
img_recon = deserialize_image(image_data, image_size)
imshow('Client : query image', img_recon, '111')
sleep(0.1) | [
340,
231
] |
def METHOD_NAME():
"""
Test that a warning about potentially long runtime is raised.
"""
with pytest.warns(UserWarning):
PairedPerLocationSubscriberCallDurations("2016-01-01", "2016-01-07") | [
9,
524,
1888,
3437
] |
def METHOD_NAME(input_data, dataset, tmpdir, serialization: Optional[ns] = None):
input_data = ns.from_dict(input_data)
def make_path(k, v, parents=None):
if is_serializable_data(v):
path = os.path.join(tmpdir, '.'.join(parents+[k, 'data']))
if vector_keys.match(k):
v = as_col(v)
path = serialize_data(v, path, config=serialization)
return k, path
return k, v
ds = ns.walk(input_data, make_path)
dataset.release()
gc.collect()
return ds | [
93,
362,
126
] |
def METHOD_NAME(self):
'''
*Method to set the self.cut_left_array and self.cut_right_array
properties, with the limits being an RF period.
This is done as a pre-processing.*
'''
# RF period
t_rf = self.RFParams.t_rf[0, self.RFParams.counter[0]]
self.cut_left_array = np.zeros(self.n_filled_buckets)
self.cut_right_array = np.zeros(self.n_filled_buckets)
for i in range(self.n_filled_buckets):
bucket_index = np.where(self.filling_pattern)[0][i]
self.cut_left_array[i] = bucket_index * t_rf
self.cut_right_array[i] = (bucket_index + 1) * t_rf | [
0,
3167
] |
def METHOD_NAME(
staff_api_client, payment_with_public_metadata
):
# when
response = execute_query_public_metadata_for_payment(
staff_api_client, payment_with_public_metadata
)
# then
assert_no_permission(response) | [
9,
539,
1609,
1094,
43,
13,
947
] |
f METHOD_NAME(self): | [
9,
420,
450,
529,
40,
584
] |
async def METHOD_NAME(self, target: int) -> list:
"""
Give a list of workers to close that brings us down to target workers
"""
# TODO, improve me with something that thinks about current load
return list(self.observed)[target:] | [
5930,
24,
1462
] |
def METHOD_NAME(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity") | [
2989
] |
def METHOD_NAME():
results = {"a": "A", "b": "B", "c": "C", "d": "D"}
assert _recursive_get((0, 1, "a", {"b": "c"}), results, filter=str) == (
0,
1,
"A",
{"b": "C"},
)
assert _recursive_get({"a": "b", "c": "d"}, results, filter=str) == {
"a": "B",
"c": "D",
}
assert _recursive_get(["a", "b", "c"], results, filter=str) == ("A", "B", "C")
assert _recursive_get("a", results, filter=str) == "A"
my_seq = MySequence("a", "b", "c")
my_map = MyMapping(a="a", b="b", c="c")
assert _recursive_get(("a", my_seq, ["b", "a"], my_map), results, filter=str) == (
"A",
my_seq,
("B", "A"),
my_map,
) | [
9,
4541,
19
] |
def METHOD_NAME(self, p, attr):
p_run = p.run(step=3)
wmsg = f"The `{attr}` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
getattr(p_run, attr) is p_run.results[attr] | [
9
] |
def METHOD_NAME(test_cases):
for case in test_cases:
for expected_event in case['expectedMonitoringEvents']:
for entry, value in expected_event.items():
if value in ['ANY_STR', 'ANY_INT']:
expected_event[entry] = mock.ANY | [
369,
391,
-1
] |
def METHOD_NAME(action_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActionRuleByNameResult]:
"""
Get a specific action rule
Azure REST API version: 2019-05-05-preview.
:param str action_rule_name: The name of action rule that needs to be fetched
:param str resource_group_name: Resource group name where the resource is created.
"""
... | [
19,
1006,
446,
604,
156,
146
] |
def METHOD_NAME(self, game):
self._send_command(AgentCommand.END_GAME, game=game)
self._close() | [
1798,
2674
] |
def METHOD_NAME(self, row: int, column: int, parent: QModelIndex = None) -> QModelIndex:
if parent is None:
parent = QModelIndex()
if parent.isValid():
return QModelIndex()
return self.createIndex(row, column, None) | [
724
] |
def METHOD_NAME(i : int):
import numpy as np
_ = np.ones(i)
return True | [
129,
1676
] |
def METHOD_NAME(api_data):
cluster = Cluster(api_data["cluster"], api_data["tenant"])
assert not cluster.config_git_repo_revision
set_on_tenant = api_data.copy()
set_on_tenant["tenant"]["tenantGitRepoRevision"] = "v1.2.3"
cluster = Cluster(set_on_tenant["cluster"], set_on_tenant["tenant"])
assert cluster.config_git_repo_revision == "v1.2.3"
set_on_cluster = api_data.copy()
set_on_cluster["cluster"]["tenantGitRepoRevision"] = "v3.2.1"
cluster = Cluster(set_on_cluster["cluster"], set_on_cluster["tenant"])
assert cluster.config_git_repo_revision == "v3.2.1"
set_on_both = api_data.copy()
set_on_both["cluster"]["tenantGitRepoRevision"] = "v2.3.1"
set_on_both["tenant"]["tenantGitRepoRevision"] = "v1.2.3"
cluster = Cluster(set_on_both["cluster"], set_on_both["tenant"])
assert cluster.config_git_repo_revision == "v2.3.1" | [
9,
200,
1493,
522,
71
] |
def METHOD_NAME(self, x, y, pixel):
"""Set pixel at the speficied position."""
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
i = 3 * (y * self.width + x)
self.data[i] = pixel[0]
self.data[i + 1] = pixel[1]
self.data[i + 2] = pixel[2] | [
0,
976
] |
def METHOD_NAME(self):
m = ConcreteModel()
m.a = Set(initialize=[1, 2, 3], ordered=True)
m.x = Var(m.a)
stream = StringIO()
m.x[2].pprint(ostream=stream)
correct_s = (
'{Member of x} : Size=3, Index=a\n '
'Key : Lower : Value : Upper : Fixed : Stale : Domain\n '
'2 : None : None : None : False : True : Reals\n'
)
self.assertEqual(correct_s, stream.getvalue()) | [
9,
1007,
365,
3742
] |
def METHOD_NAME(
model_path: str,
add_bos: bool = False,
add_eos: bool = True,
reverse: bool = False,
):
"""Load a tf-text SentencePiece tokenizer from given model filepath."""
with tf.io.gfile.GFile(model_path, 'rb') as model_fp:
sp_model = model_fp.read()
sp_tokenizer = tftxt.SentencepieceTokenizer(
model=sp_model, add_bos=add_bos, add_eos=add_eos, reverse=reverse
)
return sp_tokenizer | [
557,
15163,
1345
] |
def METHOD_NAME(string):
c = 0
for char in string:
if char == "{":
c += 1
elif char == "}":
c -= 1
if c < 0:
return True
return c != 0 | [
220,
8790,
6377
] |
def METHOD_NAME(entities, message):
"""This function tags the message with the entity name and also identify the entity values.
This functionality can be used when we have to identify entities from message without considering and
structured_value and fallback_value.
Attributes:
entities: list of entity names that needs to be identified. For example, ['date', 'time', 'restaurant']
message: message on which entity detection needs to run
Output:
will be list of dictionary
{
'entity_data': PROCESSED_ENTITY_DICTIONARY,
'tag': TAGGED_TEXT
}
Example:
entities = ['date','time','restaurant']
message = "Reserve me a table today at 6:30pm at Mainland China and on Monday at 7:00pm at Barbeque Nation"
ner_output = run_ner(entities=entities, message=message)
print ner_output
>> "data": {
"tag": "reserve me a table __date__ at __time__ at __restaurant__ and on __date__ at __time__ at __restaurant__",
"entity_data": {
"restaurant": [{
"detection": "chat",
"original_text": "barbeque nation",
"entity_value": "Barbeque Nation"
}, {
"detection": "chat",
"original_text": "mainland china",
"entity_value": "Mainland China"
}],
"date": [{
"detection": "chat",
"original_text": " monday",
"entity_value": {
"mm": 03,
"yy": 2017,
"dd": 13,
"type": "current_day"
}
}, {
"detection": "chat",
"original_text": "today",
"entity_value": {
"mm": 03,
"yy": 2017,
"dd": 11,
"type": "today"
}
}],
"time": [{
"detection": "chat",
"original_text": "6:30pm",
"entity_value": {
"mm": 30,
"hh": 6,
"nn": "pm"
}
}, {
"detection": "chat",
"original_text": "7:00pm",
"entity_value": {
"mm": 0,
"hh": 7,
"nn": "pm"
}
}]
}
} | [
22,
12650
] |
f METHOD_NAME(value): | [
1024,
1877
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.pccp_name = AAZStrArg(
options=["-n", "--name", "--pccp-name"],
help="The name of the packet core control plane.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9][a-zA-Z0-9_-]*$",
max_length=64,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema | [
56,
134,
135
] |
def METHOD_NAME(model):
"""Return the `Table` class associated with a given `model`.
The `Table` class is expected to be in the `tables` module within the application
associated with the model and its name is expected to be `{ModelName}Table`.
If a matching `Table` is not found, this will return `None`.
Returns:
Either the `Table` class or `None`
"""
return get_related_class_for_model(model, module_name="tables", object_suffix="Table") | [
19,
410,
43,
578
] |
async def METHOD_NAME(self, inter: discord.Interaction, btn: discord.Button):
self.stop()
await self.message.delete(delay=0) | [
13458
] |
def METHOD_NAME(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=True))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="(by `skip_twine` on src:dist)",
expect_process=None,
)
# Skip twine globally from config option.
rule_runner.set_options(["--twine-skip"])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 0 | [
9,
2423,
14149
] |
def METHOD_NAME(self):
# dnf.repo.Repo.load
repo = dnf.repo.Repo()
class MockRepo:
def load(self):
return True
repo._repo = MockRepo()
self.assertHasAttr(repo, "load")
repo.load() | [
9,
557
] |
def METHOD_NAME(self): # noqa: F811
return self.value.METHOD_NAME() | [
2028
] |
def METHOD_NAME(activity_type):
activity_type = frappe.get_doc("Activity Type", activity_type)
activity_type.billing_rate = 50.0
activity_type.save(ignore_permissions=True) | [
86,
809,
44
] |
def METHOD_NAME(weighted_vouch_matrix, pretrusts: NDArray):
"""
Return a vector of trust scores per user, given the vouch network
and the pre-trust scores (based on user's email domains).
ByzTrust is inspired from EigenTrust.
"""
trusts = pretrusts
delta = np.inf
while delta >= APPROXIMATION_ERROR:
# Apply vouch decay
new_trusts = pretrusts + VOUCH_DECAY * weighted_vouch_matrix.T.dot(trusts)
# Clip to avoid power concentration
new_trusts = new_trusts.clip(max=1.0)
delta = np.linalg.norm(new_trusts - trusts, ord=1)
trusts = new_trusts
return trusts | [
226,
17526
] |
def METHOD_NAME(self, filter_pattern, path, use_extracted):
filter_rx = None
if filter_pattern:
filter_rx = re.compile(filter_pattern)
def _filter(filename):
return 'blob_meta' in filename and (not filter_rx or filter_rx.match(filename))
target_dir = get_tmp_extract_dir(path, specifier='blob_meta')
target_path = Path(target_dir)
export_meta_files = []
if not target_path.exists():
with zipfile.ZipFile(path, 'r') as archive:
archive.extract('meta.json', target_dir)
for dump_file in archive.namelist():
if 'blob_meta' in dump_file:
archive.extract(dump_file, target_dir)
elif not use_extracted:
raise CommandError(
"Extracted dump already exists at {}. Delete it or use --use-extracted".format(target_dir))
meta = json.loads(target_path.joinpath('meta.json').read_text())
for file in target_path.iterdir():
if _filter(file.name):
print(f"Selected blob meta file: {file.name}")
export_meta_files.append(file)
return export_meta_files, meta | [
19,
171,
245,
61,
1094
] |
def METHOD_NAME(self):
"""Returns the children of this project tree item."""
return self._children | [
2189
] |
def METHOD_NAME(self, payment_method_token, params):
Resource.verify_keys(params, PaymentMethod.update_signature())
self.__check_for_deprecated_attributes(params);
try:
if payment_method_token is None or payment_method_token.strip() == "":
raise NotFoundError()
return self._put(
"/payment_methods/any/" + payment_method_token,
{"payment_method": params}
)
except NotFoundError:
raise NotFoundError("payment method with token " + repr(payment_method_token) + " not found") | [
86
] |
f METHOD_NAME(self): | [
9,
1344,
0,
116
] |
def METHOD_NAME(self, point2d):
iK = inv(self.K)
Pi=np.array([point2d[0]/point2d[2],point2d[1]/point2d[2],1.0],dtype=np.double).reshape(3,1)
a=np.dot(iK,Pi)
aH=np.array([a[0],a[1],a[2],1],dtype=np.double)
RT2=self.RT.copy()
RT2[0,3] = 0
RT2[1,3] = 0
RT2[2,3] = 0
RT2[3,3] = 1
b = np.dot(np.transpose(RT2),aH)
translate = np.identity(4,dtype=np.double)
translate[0,3] = self.RT[0,3]/self.RT[3,3];
translate[1,3] = self.RT[1,3]/self.RT[3,3];
translate[2,3] = self.RT[2,3]/self.RT[3,3];
b=np.dot(translate,b)
outPoint = np.array([b[0]/b[3],b[1]/b[3],b[2]/b[3],1])
return outPoint | [
-1
] |
def METHOD_NAME(self, avatar):
"""
Set the avatar.
:param avatar: See function create for examples.
:return: The response on success
API docs: https://docs.atlassian.com/bitbucket-server/rest/7.8.0/bitbucket-rest.html#idp156
"""
return self.post("avatar.png", data={"avatar": avatar}) | [
0,
5994
] |
def METHOD_NAME(object_id=None, owner=None):
layer = Dataset.objects.get(pk=object_id) if object_id else Dataset.objects.all().first()
if not owner:
owner = get_user_model().objects.get(username="admin")
for style in styles:
new_style, created = Style.objects.get_or_create(
name=style["name"], defaults=dict(sld_url=style["sld_url"], sld_body=style["sld_body"])
)
if new_style not in layer.styles.all():
layer.styles.add(new_style)
layer.default_style = new_style
layer.owner = owner
layer.save()
for attr in attributes:
Attribute.objects.update_or_create(
dataset=layer,
attribute=attr["attribute"],
attribute_label=attr["attribute_label"],
attribute_type=attr["attribute_type"],
visible=attr["visible"],
display_order=attr["display_order"],
) | [
129,
126,
365
] |
def METHOD_NAME(self):
if not self._rectangles:
self.stop()
self._window.set_position(self._destination)
return
try:
rectangle = next(self._rectangles)
except StopIteration:
self._rectangles = None
self.stop()
rectangle = self._destination
self._window.set_position(rectangle) | [
2401,
1076
] |
def METHOD_NAME(decl):
m = re_default.match(decl)
if m:
return m.group(1)
return None | [
297,
235
] |
def METHOD_NAME():
Class = type('Fox', (object,), {
'__module__': 'quick.brown',
})
assert qualname(Class) == 'quick.brown.Fox'
assert qualname(Class()) == 'quick.brown.Fox' | [
9,
9532
] |
def METHOD_NAME(_context, request):
return FlagService(request.db) | [
584,
549,
1155
] |
def METHOD_NAME(
high_tuple: Tuple[str, int], af: Optional[int] = None
) -> Any:
"""Given a "high-level" address tuple, i.e.
an (address, port) return the appropriate "low-level" address tuple
suitable for use in socket calls.
If an *af* other than ``None`` is provided, it is assumed the
address in the high-level tuple is valid and has that af. If af
is ``None``, then af_for_address will be called.
"""
address, port = high_tuple
if af is None:
af = af_for_address(address)
if af == AF_INET:
return (address, port)
elif af == AF_INET6:
i = address.find("%")
if i < 0:
# no scope, shortcut!
return (address, port, 0, 0)
# try to avoid getaddrinfo()
addrpart = address[:i]
scope = address[i + 1 :]
if scope.isdigit():
return (addrpart, port, 0, int(scope))
try:
return (addrpart, port, 0, socket.if_nametoindex(scope))
except AttributeError: # pragma: no cover (we can't really test this)
ai_flags = socket.AI_NUMERICHOST
((*_, tup), *_) = socket.getaddrinfo(address, port, flags=ai_flags)
return tup
else:
raise NotImplementedError(f"unknown address family {af}") | [
3420,
33,
85,
1815
] |
def METHOD_NAME(chars, encoding="utf-8", errors="strict"):
"""Coerce *chars* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(chars, binary_type):
return chars.decode(encoding, errors)
elif isinstance(chars, text_type):
return chars
else:
raise TypeError("not expecting type '%s'" % type(chars)) | [
602,
526
] |
def METHOD_NAME(_registry: Optional[EventsProcessesRegistry] = None) -> dict:
events_summary_log = get_events_logger(_registry=_registry).events_summary_log
with verbose_suppress("Failed to read %s", events_summary_log):
with events_summary_log.open() as fobj:
return json.load(fobj)
return {} | [
19,
2034,
417,
2718
] |
def METHOD_NAME(args: argparse.Namespace) -> None:
"""Demonstration for learning to rank with relevance degree."""
data = load_mlsr_10k(args.data, args.cache)
# Sort data according to query index
X_train, y_train, qid_train = data.train
sorted_idx = np.argsort(qid_train)
X_train = X_train[sorted_idx]
y_train = y_train[sorted_idx]
qid_train = qid_train[sorted_idx]
X_test, y_test, qid_test = data.test
sorted_idx = np.argsort(qid_test)
X_test = X_test[sorted_idx]
y_test = y_test[sorted_idx]
qid_test = qid_test[sorted_idx]
ranker = xgb.XGBRanker(
tree_method="hist",
device="cuda",
lambdarank_pair_method="topk",
lambdarank_num_pair_per_sample=13,
eval_metric=["ndcg@1", "ndcg@8"],
)
ranker.fit(
X_train,
y_train,
qid=qid_train,
eval_set=[(X_test, y_test)],
eval_qid=[qid_test],
verbose=True,
) | [
4510,
2660
] |
async def METHOD_NAME(self):
"""Signs collections if they have not been signed with key."""
tasks = []
# Filter out any content that already has a signature with pubkey_fingerprint
current_signatures = CollectionVersionSignature.objects.filter(
pubkey_fingerprint=self.signing_service.pubkey_fingerprint
)
new_content = self.content.exclude(signatures__in=current_signatures)
ntotal = await sync_to_async(new_content.count)()
nmsg = _("Signing new CollectionVersions")
async with ProgressReport(message=nmsg, code="sign.new.signature", total=ntotal) as p:
self.progress_report = p
async for collection_version in sync_to_async_iterable(new_content.iterator()):
tasks.append(asyncio.create_task(self.sign_collection_version(collection_version)))
await asyncio.gather(*tasks)
# Add any signatures already present in Pulp if part of content list
present_content = current_signatures.filter(signed_collection__in=self.content).exclude(
pk__in=self.repos_current_signatures
)
ptotal = await sync_to_async(present_content.count)()
pmsg = _("Adding present CollectionVersionSignatures")
async with ProgressReport(message=pmsg, code="sign.present.signature", total=ptotal) as np:
async for signature in sync_to_async_iterable(present_content.iterator()):
await np.aincrement()
await self.put(DeclarativeContent(content=signature)) | [
22
] |
def METHOD_NAME(self) -> Optional[float]:
"""
Total record count number.
"""
return pulumi.get(self, "count") | [
29
] |
def METHOD_NAME(self) -> Optional[str]:
"""Return extracted common_id.
:returns: Extracted common_id
:rtype: str
"""
common_id = self._data.get("common_id")
return str(common_id) if common_id else None | [
19,
67,
147
] |
def METHOD_NAME(poly_case, storage):
ert = poly_case
args = Namespace(
realizations="0-4,7,8",
weights="6,4,2",
current_case="default",
target_case="test_case_%d",
start_iteration="0",
restart_run=False,
prior_ensemble="default",
)
model = model_factory._setup_multiple_data_assimilation(
ert,
storage,
args,
UUID(int=0),
)
assert isinstance(model, MultipleDataAssimilation)
assert len(model._simulation_arguments.keys()) == 8
assert "active_realizations" in model._simulation_arguments
assert "target_case" in model._simulation_arguments
assert "analysis_module" in model._simulation_arguments
assert "weights" in model._simulation_arguments | [
9,
102,
107,
365,
-1
] |
def METHOD_NAME(self):
pbus = dbus.SystemBus(private=True)
p2_proxy = pbus.get_object(BUS_NAME,
'/org/freedesktop/Problems2')
p2 = dbus.Interface(p2_proxy,
dbus_interface='org.freedesktop.Problems2')
description = {"analyzer": "problems2testsuite_analyzer",
"reason": "Application has been killed",
"backtrace": "die()",
"duphash": "TASK_NEW_PROBLEM_SESSION",
"uuid": "TASK_NEW_PROBLEM_SESSION",
"executable": "/usr/bin/foo",
"type": "abrt-problems2-new-destroyed-with-session"}
task_path = p2.NewProblem(description, 0x1)
self.bus.get_object(BUS_NAME, task_path)
pbus.close()
task = Problems2Task(self.bus, task_path)
self.assertRaisesDBusError(
"org.freedesktop.DBus.Error.UnknownMethod: No such interface "
"'org.freedesktop.DBus.Properties' on object at path " + task_path,
task.getproperty, "Status") | [
9,
758,
80,
758,
11551,
41,
240
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.