text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('explore_states')
op.drop_table('dashboards')
# ### end Alembic commands ### | [
1502
] |
def METHOD_NAME(self):
name = get_conflicted_name(["bar"], "bar")
eq_("[000] bar", name)
name = get_conflicted_name(["bar", "[000] bar"], "bar")
eq_("[001] bar", name) | [
9,
53
] |
def METHOD_NAME(self, top: Optional[int] = None, **kwargs: Any) -> Iterable["_models.Resource"]:
"""The List operation gets information about the vaults associated with the subscription.
:param top: Maximum number of results to return. Default value is None.
:type top: int
:keyword filter: The filter to apply on the operation. Default value is "resourceType eq
'Microsoft.KeyVault/vaults'". Note that overriding this default value may result in unsupported
behavior.
:paramtype filter: str
:keyword api_version: Azure Resource Manager Api Version. Default value is "2015-11-01". Note
that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Resource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2023_02_01.models.Resource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
filter: Literal["resourceType eq 'Microsoft.KeyVault/vaults'"] = kwargs.pop(
"filter", _params.pop("$filter", "resourceType eq 'Microsoft.KeyVault/vaults'")
)
api_version: Literal["2015-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2015-11-01"))
cls: ClsType[_models.ResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
top=top,
filter=filter,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
# change this line to use the passed in api version
_next_request_params["api-version"] = api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | [
245
] |
def METHOD_NAME(self):
store = self.new_store(excludes='*/*20220502*')
self.assertEqual({'l1b/olci-l1b-20220501.zarr',
'l1b/olci-l1b-20220503.zarr',
'l2/olci-l2-20220501.zarr',
'l2/olci-l2-20220503.zarr',
'l3/olci-l3-2020.levels',
'l3/olci-l3-2021.levels'},
set(store.get_data_ids()))
store = self.new_store(excludes=['*/*20220502*', '*.levels'])
self.assertEqual({'l1b/olci-l1b-20220501.zarr',
'l1b/olci-l1b-20220503.zarr',
'l2/olci-l2-20220501.zarr',
'l2/olci-l2-20220503.zarr'},
set(store.get_data_ids()))
store = self.new_store(excludes=['*.zarr', '*.levels'])
self.assertEqual(set(),
set(store.get_data_ids())) | [
9,
982
] |
def METHOD_NAME():
client = create_client()
message = create_message()
sequencer = unordered_sequencer.UnorderedSequencer(client, "topic_name")
sequencer.publish(message, retry=mock.sentinel.custom_retry)
assert sequencer._current_batch is not None
assert sequencer._current_batch._commit_retry is mock.sentinel.custom_retry | [
9,
2411,
343,
2052
] |
def METHOD_NAME(self, X):
"""Make predictions using the fitted XGBoost classifier.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.DataFrame: Predicted values.
"""
X, _ = super()._manage_woodwork(X)
X = _rename_column_names_to_numeric(X)
predictions = super().METHOD_NAME(X)
if not self._label_encoder:
return predictions
predictions = self._label_encoder.inverse_transform(
predictions.astype(np.int64),
)
return predictions | [
2103
] |
def METHOD_NAME():
timeout = httpx.Timeout(None)
assert timeout.connect is None
assert timeout.read is None
assert timeout.write is None
assert timeout.pool is None | [
9,
659,
280,
5996
] |
f METHOD_NAME(self): | [
9,
1716,
156,
43,
3802
] |
def METHOD_NAME(cls, ctx, op):
(in_chunk,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
axis = cls.get_axis(op.axis)
with device(device_id):
chunk_count = numel(
in_chunk, axis=axis, dtype=np.int64, keepdims=bool(op.keepdims)
)
chunk_sum = xp.sum(
in_chunk, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims)
)
ctx[op.outputs[0].key] = (chunk_sum, chunk_count) | [
750,
422
] |
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}",
**self.url_parameters
) | [
274
] |
def METHOD_NAME(self):
self._check_status(self.Status.CREATED, self.Status.INITED)
if self.status_ == self.Status.CREATED:
config_proto_str = text_format.MessageToString(self.config_proto)
self._session_ctx.try_init(config_proto_str)
self.status_ = self.Status.INITED | [
1365,
176
] |
async def METHOD_NAME():
... | [
9,
1047,
13273,
804,
43,
10507,
5399
] |
def METHOD_NAME(self):
super().METHOD_NAME()
self.user = UserFactory.create(password=self.password)
self.profile = self.user.profile | [
0,
1
] |
def METHOD_NAME(self):
if self._first_page_iterator_instance is None:
self._first_page_iterator_instance = self.by_page()
return self._first_page_iterator_instance | [
865,
640,
89
] |
def METHOD_NAME(self, coin_type, delta=0):
if coin_type not in COIN_TYPES:
raise ValueError("coin_type must be in ('pp', 'gp', 'ep', 'sp', 'cp')")
delta_out = ""
if delta:
delta_out = f" ({delta:+,})"
coin_value = f"{getattr(self, coin_type):,}"
return f"{COIN_TYPES[coin_type]['icon']} {coin_value} {coin_type}{delta_out}" | [
4865,
144
] |
def METHOD_NAME(self) -> None:
self.collect_by_name(["Apple", "Champ", "Red ID Card"])
self.assertBeatable(False) # 0 floppies
floppies = self.get_items_by_name("Floppy Disk")
win = self.get_item_by_name("Win")
self.collect(floppies[:-2]) # 1 too few
self.assertEqual(self.count("Floppy Disk"), 5)
self.assertBeatable(False)
self.collect(floppies[-2:-1]) # exact
self.assertEqual(self.count("Floppy Disk"), 6)
self.assertBeatable(True)
self.remove([win]) # reset
self.collect(floppies[-1:]) # 1 extra
self.assertEqual(self.count("Floppy Disk"), 7)
self.assertBeatable(True) | [
9,
-1
] |
def METHOD_NAME():
METHOD_NAME = Main()
METHOD_NAME.run() | [
57
] |
def METHOD_NAME(self, customScript, configurationAttributes):
print "Fido2Extension. Initialization"
print "Fido2Extension. Initialized successfully"
return True | [
176
] |
def METHOD_NAME(self, devs):
mps = set()
return mps | [
-1
] |
def METHOD_NAME(self, event):
value = event.new
low, high = value
low = max(low, 0)
high = min(high, 1)
self.plot.index.metadata["selections"] = (low, high)
self.value = (low, high) | [
86,
3223
] |
def METHOD_NAME(
self, cognite_client: CogniteClient, integration_test_space: Space, monkeypatch: Any
) -> None:
# Arrange
valid_space = SpaceApply(
space="myNewValidSpace",
)
invalid_space = SpaceApply(space="myInvalidSpace", name="wayTooLong" * 255)
monkeypatch.setattr(cognite_client.data_modeling.spaces, "_CREATE_LIMIT", 1)
try:
# Act
with pytest.raises(CogniteAPIError) as error:
cognite_client.data_modeling.spaces.apply([valid_space, invalid_space])
# Assert
assert "name size must be between 0 and 255" in error.value.message
assert error.value.code == 400
assert len(error.value.successful) == 1
assert len(error.value.failed) == 1
finally:
# Cleanup
cognite_client.data_modeling.spaces.delete(valid_space.as_id()) | [
9,
231,
1423,
61,
3163,
758
] |
def METHOD_NAME(self, widget):
# About Dialog usage code
about = gtk.AboutDialog()
about.set_program_name("NNStreamer Toolkit")
about.set_version("0.0.1")
about.set_authors([
'Geunsik Lim',
'Bug Reports and Patches:',
'MyungJoo Ham', 'Jijoong Moon', 'Sangjung Woo', 'Wook Song', 'Jaeyun Jung', 'Hyoungjoo Ahn',
])
about.set_copyright("(c) Samsung Electronics")
about.set_comments("About NNStreamer Toolkit")
about.set_website("https://github.com/nnstreamer/nnstreamer")
about.run()
about.destroy() | [
69,
-1
] |
def METHOD_NAME(self):
"""
Tests before and after start as well as after stop metadata.
"""
my_driver = TestDriverMetadata.MyDriver(
name="mydriver",
metadata_extractor=TestDriverMetadata.metadata_extractor_mydriver,
)
test = my_driver.extract_driver_metadata().to_dict()
expected = {"test_attribute": my_driver.test_attribute}
assert test == expected
my_driver.start()
test = my_driver.extract_driver_metadata().to_dict()
expected["test_attribute"] = "foo"
assert test == expected
my_driver.stop()
test = my_driver.extract_driver_metadata().to_dict()
expected["test_attribute"] = "bar"
assert test == expected | [
9,
297,
-1,
773
] |
def METHOD_NAME():
for value in [None, "foo", 1, 1.39, 1 + 1j, True]:
assert util.is_primitive(value) is True
for value in [[], (), {}, set()]:
assert util.is_primitive(value) is False | [
9,
137,
1478
] |
def METHOD_NAME(processors_node):
sysnode = '/sys/devices/system/cpu/'
threads = processors_node.xpath("//thread")
for thread in threads:
cpu_id = get_node(thread, "cpu_id/text()")
try:
with open(sysnode + "cpu{cpu_id}/cpufreq/freqdomain_cpus", 'r') as f_node:
freqdomain_cpus = f_node.read()
except IOError:
logging.info("No _PSD info for cpu {cpu_id}")
freqdomain_cpus = cpu_id
freqdomain_cpus.replace('\n','')
add_child(thread, "freqdomain_cpus", freqdomain_cpus) | [
297,
6737,
100
] |
def METHOD_NAME(
raw_dict: Dict[Any, Any], cls: Type[PerturbationDescription]
) -> PerturbationDescription:
"""Convert a raw dictionary to a PerturbationDescription.
This uses the name field to look up the correct PerturbationDescription subclass to output.
"""
structure = perturbation_name_to_base_structure_fn.get(
raw_dict["name"], base_perturbation_description_structure_fn
)
return structure(raw_dict, cls) | [
1011,
11931,
1067
] |
def METHOD_NAME(self):
dirname = os.path.join(support.TESTFN, 'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = '\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'wb') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname) | [
9,
2851
] |
def METHOD_NAME(self) -> str:
"""
Database name of the source data set
"""
return pulumi.get(self, "database_name") | [
463,
156
] |
def METHOD_NAME(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | [
19,
3151
] |
def METHOD_NAME(cls):
"""Runs once before any tests in this class."""
if not QApplication.instance():
QApplication() | [
0,
1,
2
] |
def METHOD_NAME(self, town_code, measurement_number, user, referer):
try:
town_info = self._get_town_by_code(int(town_code))
if town_info is None:
return {}
files = os.listdir(town_info.get('path'))
mes = self._add_char_before(5, measurement_number)
files_list = []
for cur_file in files:
if cur_file.startswith(mes):
files_list.append({'filename': cur_file,
'is_downloadable':
self._is_download_authorized(
town_info.get('name'),
user, referer)
})
if len(files_list) == 0:
return {}
return {"town_info": town_info,
"files": files_list}
except:
print(sys.exc_info())
return {} | [
19,
4555,
171,
157
] |
def METHOD_NAME(self):
room = create_object(tutrooms.DarkRoom, key="darkroom")
self.char1.move_to(room, move_type="teleport")
self.call(tutrooms.CmdDarkHelp(), "", "Can't help you until") | [
9,
-1
] |
def METHOD_NAME(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot | [
3829,
24,
206,
3269
] |
def METHOD_NAME(self, host_info_list: HostInfoList, workdir: str, env: dict) -> None:
"""
Establish connections to a list of hosts
Args:
host_info_list (HostInfoList): a list of HostInfo objects
workdir (str): the directory where command is executed
env (dict): environment variables to propagate to hosts
"""
for hostinfo in host_info_list:
master_send_conn, worker_recv_conn = Pipe()
master_recv_conn, worker_send_conn = Pipe()
p = Process(target=run_on_host, args=(hostinfo, workdir, worker_recv_conn, worker_send_conn, env))
p.start()
self.processes[hostinfo.hostname] = p
self.master_recv_conns[hostinfo.hostname] = master_recv_conn
self.master_send_conns[hostinfo.hostname] = master_send_conn | [
707
] |
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
) | [
69,
1072
] |
def METHOD_NAME(self, path_to_yaml: str) -> None:
"""
Registers the environments listed in a yaml file (either local or remote). Note
that the entries are registered lazily: the registration will only happen when
an environment is accessed.
The yaml file must have the following format :
```yaml
environments:
- <identifier of the first environment>:
expected_reward: <expected reward of the environment>
description: | <a multi line description of the environment>
<continued multi line description>
linux_url: <The url for the Linux executable zip file>
darwin_url: <The url for the OSX executable zip file>
win_url: <The url for the Windows executable zip file>
- <identifier of the second environment>:
expected_reward: <expected reward of the environment>
description: | <a multi line description of the environment>
<continued multi line description>
linux_url: <The url for the Linux executable zip file>
darwin_url: <The url for the OSX executable zip file>
win_url: <The url for the Windows executable zip file>
- ...
```
:param path_to_yaml: A local path or url to the yaml file
"""
self._manifests.append(path_to_yaml)
self._sync = False | [
372,
280,
406
] |
def METHOD_NAME(f):
segbits = {}
for l in f:
# CLBLM_L.SLICEL_X1.ALUT.INIT[10] 29_14
l = l.strip()
if not l:
continue
parts = l.split(' ')
assert len(parts) > 1, l
segbits[parts[0]] = [parsebit(val) for val in parts[1:]]
return segbits | [
203,
15010
] |
def METHOD_NAME(folders: List[str]) -> str:
versioned_folders = [
(folder, version.parse(folder[1:]))
for folder in folders
if re.match(r"^v[0-9]+\.[0-9]+\.[0-9]+$", folder)
]
versioned_folders.sort(key=lambda ver: ver[1])
# get the latest version
if versioned_folders:
return versioned_folders[-1][0]
# fall back on main if available as default
if "main" in folders:
return "main"
# fall back on any other folder sorted
folders.sort()
return folders[-1] | [
19,
893,
451
] |
def METHOD_NAME(self):
return super(
SubmissionsSimilarityEntryAdmin, self
).METHOD_NAME() + [
'submission',
'submission__user',
'submission__problem_instance',
] | [
19,
343,
245,
1472,
252
] |
def METHOD_NAME(database: str, reset: bool, verbose: bool, legacy_region_name: str):
"""
This is a development tool that can convert a monolith database into
control + region databases by using silo annotations.
This operation will not modify the original source database.
"""
# We have a few tables that either need to be in both silos,
# or only in control. These tables don't have silo annotations
# as they are inherited from django and their silo assignments
# need to be manually defined.
region_tables = ["django_migrations", "django_content_type"]
control_tables = [
"django_migrations",
"django_admin_log",
"django_content_type",
"django_site",
"django_session",
"auth_user",
"auth_group",
"auth_permission",
"auth_group_permissions",
"auth_user_groups",
"auth_user_user_permissions",
]
for model in apps.get_models():
silo_limit = getattr(model._meta, "silo_limit", None)
if not silo_limit:
click.echo(f"> Could not find silo assignment for {model._meta.db_table}")
continue
if SiloMode.CONTROL in silo_limit.modes:
control_tables.append(model._meta.db_table)
if SiloMode.REGION in silo_limit.modes:
region_tables.append(model._meta.db_table)
revise_organization_mappings(legacy_region_name=legacy_region_name)
split_database(control_tables, database, "control", reset=reset, verbose=verbose)
split_database(region_tables, database, "region", reset=reset, verbose=verbose) | [
57
] |
def METHOD_NAME() -> None:
"""
An example for initializing a torchrec sharded embedding bag with a
pretrained embedding weight.
Environment assumptions:
- The embedding weight fits in the RAM of a single host, but may OOM if all
processes on the host load the embedding weight simultaneously.
- For simplicity, the demo assumes a single-host, multi-process environment.
"""
dist.init_process_group(backend="nccl")
local_rank = int(os.environ["LOCAL_RANK"])
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)
pg = dist.group.WORLD
assert pg is not None
dist.barrier()
if dist.get_rank() == 0:
# Pretend that we are loading the pretrained embedding weight from a parquet file on rank 0.
emb = torch.rand(2000000, 64)
# Share the tensor to local peers via shared memory
emb = share_tensor_via_shm(tensor=emb)
else:
# Received the tensor shared by rank 0 via shared memory
emb = share_tensor_via_shm(tensor=None)
assert emb.is_shared()
# For demo purpose, the entire model is an embedding bag collection with a
# single embedding bag.
ebc = EmbeddingBagCollection(
device=torch.device("meta"),
tables=[
torchrec.EmbeddingBagConfig(
name="emb",
embedding_dim=64,
num_embeddings=2000000,
feature_names=["f"],
pooling=torchrec.PoolingType.SUM,
)
],
)
# Create a rowwise sharding plan
sharders = cast(
List[ModuleSharder[nn.Module]],
[
EmbeddingBagCollectionSharder(
fused_params={
"optimizer": EmbOptimType.EXACT_ROWWISE_ADAGRAD,
"learning_rate": 0.01,
"eps": 0.01,
}
)
],
)
plan = EmbeddingShardingPlanner(
topology=Topology(world_size=dist.get_world_size(), compute_device=device.type),
constraints={
"emb": ParameterConstraints(sharding_types=[ShardingType.ROW_WISE.value])
},
).collective_plan(
ebc,
sharders,
pg,
)
print(plan)
# Initialize dmp which shards the embedding bag
dmp = DistributedModelParallel(
module=ebc,
device=device,
plan=plan,
sharders=sharders,
)
print(
"Finished initializing DistributedModelParallel. "
f"Current device utilization: {torch.cuda.memory_allocated() / 1_000_000} MB"
)
# For each shard in sharded tensors, load from the corresponding slice from
# the pretrained weights in shared memory.
for rank in range(dist.get_world_size()):
if dist.get_rank() == rank:
for _, t in dmp.state_dict().items():
for shard in t.local_shards():
offsets = shard.metadata.shard_offsets
lengths = shard.metadata.shard_sizes
src = emb[
offsets[0] : offsets[0] + lengths[0],
offsets[1] : offsets[1] + lengths[1],
]
shard.tensor.copy_(src)
dist.barrier()
else:
dist.barrier() | [
57
] |
f METHOD_NAME(*args, **kwargs): | [
1413,
141,
61,
238,
9553
] |
def METHOD_NAME(self, batch_cnt):
return self.decay_steps / (
self.decay_steps + np.exp(batch_cnt / self.decay_steps)
) | [
226,
5323
] |
def METHOD_NAME(str):
return str.replace('_Bool', 'bool') | [
527,
119
] |
def METHOD_NAME(x1: Tensor, x2: Tensor, x1_eq_x2: bool) -> Tensor:
res = x1.size(-2) - (x1.unsqueeze(-3) * x2.unsqueeze(-4)).sum(dim=(-1, -2))
if x1_eq_x2 and not x1.requires_grad and not x2.requires_grad:
res.diagonal(dim1=-2, dim2=-1).fill_(0)
# Zero out negative values
return res.clamp_min_(0) | [
2771,
1260
] |
def METHOD_NAME(self, input, **opts):
try:
logger.info("Exporting raster {} with options: {}".format(input, json.dumps(opts)))
tmpfile = tempfile.mktemp('_raster.{}'.format(extension_for_export_format(opts.get('format', 'gtiff'))), dir=settings.MEDIA_TMP)
export_raster_sync(input, tmpfile, **opts)
result = {'file': tmpfile}
if settings.TESTING:
TestSafeAsyncResult.set(self.request.id, result)
return result
except Exception as e:
logger.error(str(e))
return {'error': str(e)} | [
294,
5227
] |
def METHOD_NAME(G):
r"""
Creates an OpenPNM Network from a undirected NetworkX graph object
Parameters
----------
G : networkx.classes.graph.Graph Object
The NetworkX graph. G should be undirected. The numbering of nodes
should be numeric (int's), zero-based and should not contain any
gaps, i.e. ``G.nodes() = [0,1,3,4,5]`` is not allowed and should be
mapped to ``G.nodes() = [0,1,2,3,4]``.
Returns
-------
network : dict
An OpenPNM network dictionary
Notes
-----
1. Each node in a NetworkX object (i.e. ``G``) can be assigned properties
using syntax like ``G.node[n]['diameter'] = 0.5`` where ``n`` is the
node number. There is no need to precede the property name with any
indication that it is pore data such as \'pore\_\'. OpenPNM will prepend
\'pore.\' to each property name.
2. Since \'pore.coords\' is so central to OpenPNM it should be specified
in the NetworkX object as \'coords\', and the [X, Y, Z] coordinates of
each node should be a 1x3 list.
3. Edges in a NetworkX object are accessed using the index numbers of the
two nodes it connects, such as ``G.adj[2][3]['length'] = 0.1``
indicating the edge that connects nodes 2 and 3. There is no need to
precede the property name with any indication that it is throat data such
as \'throat\_\'. OpenPNM will prepend \'throat.\' to each property name.
4. The \'throat.conns\' property is essential to OpenPNM, but this does NOT
need to be specified explicitly as a property in NetworkX. The
connectivity is embedded into the network representation and is extracted
by OpenPNM.
"""
import networkx as nx
from openpnm.network import Network
net = {}
# Ensure G is an undirected networkX graph with numerically numbered
# nodes for which numbering starts at 0 and does not contain any gaps
if not isinstance(G, nx.Graph): # pragma: no cover
raise Exception('Provided object is not a NetworkX graph.')
if nx.is_directed(G): # pragma: no cover
raise Exception('Provided graph is directed. Convert to undirected graph.')
if not all(isinstance(n, int) for n in G.nodes()): # pragma: no cover
raise Exception('Node numbering is not numeric. Convert to int.')
if min(G.nodes()) != 0: # pragma: no cover
raise Exception('Node numbering does not start at zero.')
if max(G.nodes()) + 1 != len(G.nodes()): # pragma: no cover
raise Exception('Node numbering contains gaps. Map nodes to remove gaps.')
# Parsing node data
Np = len(G)
net.update({'pore.all': np.ones((Np,), dtype=bool)})
for n, props in G.nodes(data=True):
for item in props.keys():
val = props[item]
dtype = type(val)
# Remove prepended pore. and pore_ if present
for b in ['pore.', 'pore_']:
item = item.replace(b, '')
# Create arrays for subsequent indexing, if not present already
if 'pore.'+item not in net.keys():
if dtype == str: # handle strings of arbitrary length
net['pore.'+item] = np.ndarray((Np,), dtype='object')
elif dtype is list:
dtype = type(val[0])
if dtype == str:
dtype = 'object'
cols = len(val)
net['pore.'+item] = np.ndarray((Np, cols), dtype=dtype)
else:
net['pore.'+item] = np.ndarray((Np,), dtype=dtype)
net['pore.'+item][n] = val
# Parsing edge data
# Deal with conns explicitly
try:
conns = list(G.edges) # NetworkX V2
except Exception: # pragma: no cover
conns = G.edges() # NetworkX V1
conns.sort()
# Add conns to Network
Nt = len(conns)
net.update({'throat.all': np.ones(Nt, dtype=bool)})
net.update({'throat.conns': np.array(conns)})
# Scan through each edge and extract all its properties
i = 0
for t in conns:
props = G[t[0]][t[1]]
for item in props:
val = props[item]
dtype = type(val)
# Remove prepended throat. and throat_ if present
for b in ['throat.', 'throat_']:
item = item.replace(b, '')
# Create arrays for subsequent indexing, if not present already
if 'throat.'+item not in net.keys():
if dtype == str:
net['throat.'+item] = np.ndarray((Nt,), dtype='object')
if dtype is list:
dtype = type(val[0])
if dtype == str:
dtype = 'object'
cols = len(val)
net['throat.'+item] = np.ndarray((Nt, cols),
dtype=dtype)
else:
net['throat.'+item] = np.ndarray((Nt,), dtype=dtype)
net['throat.'+item][i] = val
i += 1
network = Network()
network.update(net)
return network | [
1228,
280,
13559
] |
def METHOD_NAME(self, monkeypatch):
'''pwd is not readable, use instance's tmpdir property'''
def mock_getcwd():
return '/tmp'
def mock_access(path, perm):
if path == '/tmp' and perm == 4:
return False
return True
def mock_expandvars(var):
if var == '$HOME':
return '/home/foobar'
return var
def mock_gettempdir():
return '/tmp/testdir'
def mock_chdir(path):
if path == '/tmp':
raise Exception()
return
monkeypatch.setattr(os, 'getcwd', mock_getcwd)
monkeypatch.setattr(os, 'chdir', mock_chdir)
monkeypatch.setattr(os, 'access', mock_access)
monkeypatch.setattr(os.path, 'expandvars', mock_expandvars)
monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}})))
with patch('time.time', return_value=42):
am = basic.AnsibleModule(argument_spec={})
am._tmpdir = '/tmp2'
result = am._set_cwd()
assert result == am._tmpdir | [
9,
0,
1925,
8145,
1080,
927,
4015
] |
def METHOD_NAME() -> None:
lex = FilterSyntaxLexer().lex
data = "#some_tag ^some_link -^some_link"
assert [(tok.type, tok.value) for tok in lex(data)] == [
("TAG", "some_tag"),
("LINK", "some_link"),
("-", "-"),
("LINK", "some_link"),
]
data = "'string' string \"string\""
assert [(tok.type, tok.value) for tok in lex(data)] == [
("STRING", "string"),
("STRING", "string"),
("STRING", "string"),
]
with pytest.raises(FilterError):
list(lex("|")) | [
9,
5886,
756
] |
def METHOD_NAME(self):
self.assertEqual(self.enum.name_text, 'errors') | [
9,
156,
526,
610,
668,
156
] |
def METHOD_NAME():
resgrid = np.array([
[[100., 100., 100., 100., 100.], [150., 150., 150., 150., 150.], [100., 100., 100, 100., 100.]],
[[150., 150., 150., 150., 150.], [100., 100., 100., 100., 100.], [150., 150., 150., 150., 150.]],
])
# Transpose to get [[Y], [X], [Z]]
resgrid = resgrid.T
test = conv._strip_resgrid(resgrid, 1, 1, 1)
# We're padding one cell off either side of X and Y, and one cell
# off the end of Z. So we're left with the [150.] array in middle
# of the top row, with two elements from either side removed.
expected = np.array([[[150., 150., 150.]]]).T # Again, note the transpose.
np.testing.assert_array_equal(test, expected) | [
9,
1360,
-1
] |
def METHOD_NAME(levels):
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j]*level_repeat
rng = lvl*range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H | [
-1
] |
def METHOD_NAME(self, data: dict):
data["event"]["type"] = Event.Type.ACKNOWLEDGE
return super().METHOD_NAME(data) | [
24,
2026,
99
] |
def METHOD_NAME():
def group_func(d):
return d.time
dimension = 'time'
units = None
datasets = [
SimpleNamespace(time=datetime.datetime(2016, 1, 1), value='foo', id=UUID(int=10)),
SimpleNamespace(time=datetime.datetime(2016, 2, 1), value='bar', id=UUID(int=1)),
SimpleNamespace(time=datetime.datetime(2016, 1, 1), value='flim', id=UUID(int=9)),
]
group_by = GroupBy(group_func, dimension, units, sort_key=group_func)
grouped = Datacube.group_datasets(datasets, group_by)
dss = grouped.isel(time=0).values[()]
assert isinstance(dss, tuple)
assert len(dss) == 2
assert [ds.value for ds in dss] == ['flim', 'foo']
dss = grouped.isel(time=1).values[()]
assert isinstance(dss, tuple)
assert len(dss) == 1
assert [ds.value for ds in dss] == ['bar']
assert str(grouped.time.dtype) == 'datetime64[ns]'
assert grouped.loc['2016-01-01':'2016-01-15'] | [
9,
5421,
4146
] |
def METHOD_NAME(self):
"""
Test if gecos fields are built correctly (removing trailing commas)
"""
test_gecos = {
"fullname": "Testing",
"roomnumber": 1234,
"workphone": 22222,
"homephone": 99999,
}
expected_gecos_fields = "Testing,1234,22222,99999"
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop("roomnumber")
test_gecos.pop("workphone")
expected_gecos_fields = "Testing,,,99999"
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop("homephone")
expected_gecos_fields = "Testing"
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields) | [
9,
56,
2442,
101
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters | [
572,
386
] |
def METHOD_NAME(self, _utils: PulseAudioUtils) -> None:
logging.info("connected")
for dev in self.deferred:
self.regenerate_with_device(dev['Address'])
self.deferred = [] | [
69,
10792,
1338
] |
def METHOD_NAME():
return codecs.CodecInfo(
name='utf-8-sig',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
) | [
8277
] |
def METHOD_NAME():
# Basic view
assert_equal("/", reverse_with_get("desktop_views.index"))
# Arguments for the view
assert_equal("/desktop/api2/user_preferences/foo", reverse_with_get("desktop.api2.user_preferences", kwargs=dict(key="foo")))
# Arguments for the view as well as GET parameters
assert_equal("/desktop/api2/user_preferences/foo?a=1&b=2",
reverse_with_get("desktop.api2.user_preferences", kwargs=dict(key="foo"), get=dict(a=1,b=2)))
# You can use a list of args instead of kwargs, too
assert_equal("/desktop/api2/user_preferences/foo?a=1&b=2",
reverse_with_get("desktop.api2.user_preferences", args=["foo"], get=dict(a=1,b=2)))
# Just GET parameters
assert_equal("/?a=1", reverse_with_get("desktop_views.index", get=dict(a="1")))
# No GET parameters
assert_equal("/", reverse_with_get("desktop_views.index", get=dict())) | [
9,
1354,
41,
19
] |
def METHOD_NAME(thread, wrapper):
""" Adds the indicated core.Thread object, with the indicated Python
wrapper, to the thread list. Returns the new thread ID. """
global _nextThreadId
_threadsLock.acquire()
try:
threadId = _nextThreadId
_nextThreadId += 1
thread.setPythonIndex(threadId)
_threads[threadId] = (thread, {}, wrapper)
return threadId
finally:
_threadsLock.release() | [
238,
600
] |
def METHOD_NAME(obj, enable, placement = None, offset = 0, tv = None):
if tv is None:
from Show import TempoVis
tv = TempoVis(obj.Document)
tv.modify(ClipPlane(obj, enable, placement, offset))
return tv | [
4226,
1088
] |
def METHOD_NAME(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ... | [
86
] |
def METHOD_NAME(timer_client, start, interval):
meta = load_response(timer_client.create_job).metadata
transfer_data = TransferData(
source_endpoint=GO_EP1_ID, destination_endpoint=GO_EP2_ID
)
timer_job = TimerJob.from_transfer_data(transfer_data, start, interval)
response = timer_client.create_job(timer_job)
assert response.http_status == 201
assert response.data["job_id"] == meta["job_id"]
timer_job = TimerJob.from_transfer_data(dict(transfer_data), start, interval)
response = timer_client.create_job(timer_job)
assert response.http_status == 201
assert response.data["job_id"] == meta["job_id"]
req_body = json.loads(get_last_request().body)
if isinstance(start, datetime.datetime):
assert req_body["start"] == start.isoformat()
else:
assert req_body["start"] == start
if isinstance(interval, datetime.timedelta):
assert req_body["interval"] == interval.total_seconds()
else:
assert req_body["interval"] == interval
assert req_body["callback_url"] == slash_join(
get_service_url("actions"), "/transfer/transfer/run"
) | [
9,
129,
202
] |
def METHOD_NAME(self):
fake_module = MagicMock()
fake_module.params = fake_params
expected_cmd = [
fake_binary,
'--cluster', fake_cluster,
'zonegroup', 'modify',
'--rgw-realm=' + fake_realm,
'--rgw-zonegroup=' + fake_zonegroup,
'--endpoints=' + ','.join(fake_endpoints),
'--default',
'--master'
]
assert radosgw_zonegroup.modify_zonegroup(fake_module) == expected_cmd | [
9,
2444,
-1
] |
def METHOD_NAME(self):
"Test the recipe run view with POST request"
data = {"name": "name of the job"}
url = reverse('recipe_run', kwargs=dict(uid=self.recipe.uid))
request = fake_request(url=url, data=data, user=self.owner)
self.recipe.security = models.Analysis.AUTHORIZED
self.recipe.save()
response = views.recipe_run(request=request, uid=self.recipe.uid)
self.process_response(response=response, data=data, save=True, model=models.Job) | [
9,
3912,
22
] |
def METHOD_NAME(cmd):
if (options.debug == True):
print >>sys.stderr, "running cmd:",cmd
try:
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal: ", -retcode
else:
if (options.debug == True):
print >>sys.stderr, "Child returned code: ", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e | [
22,
462
] |
def METHOD_NAME(self, t):
"""Compute the value of u10 (real and imag), the offdiagonal term"""
pr, pi = self.u00(t, swap=-1.0)
return pr, pi | [
-1
] |
def METHOD_NAME(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Gets a private endpoint connection.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
... | [
19,
547,
841,
550,
146
] |
def METHOD_NAME(self):
nc_path = cdl_to_nc(self.ref_cdl)
with self.assertRaisesRegex(
ValueError, ".*Must be True to enable mesh loading."
):
_ = load_meshes(nc_path) | [
9,
654,
4078
] |
def METHOD_NAME(expect_filename, expect_bucket, expect_key,
expect_extra_args=None, expect_config=None):
"""Make a mock upload function that asserts against expected arguments and
calls the Callback with the upload file size."""
def mock_upload_file(Filename=None, Bucket=None, Key=None, Callback=None,
ExtraArgs=None, Config=None):
assert Filename == expect_filename
assert Bucket == expect_bucket
assert expect_key == Key
assert expect_extra_args == ExtraArgs
if expect_config or Config:
for key, val in expect_config.items():
assert getattr(Config, key) == val
Callback(os.path.getsize(Filename))
return mock_upload_file | [
93,
248,
172
] |
def METHOD_NAME(self) -> Optional[str]:
"""
A resource identifier for the API the issue was created for.
"""
return pulumi.get(self, "api_id") | [
58,
147
] |
def METHOD_NAME(self) -> UploadFileDict:
"""Gives the files contained in this object as mapping of part name to encoded content."""
METHOD_NAME: UploadFileDict = {}
for param in self._parameters:
m_data = param.METHOD_NAME
if m_data:
METHOD_NAME.update(m_data)
return METHOD_NAME | [
6261,
365
] |
def METHOD_NAME(self):
editor = self.get_plugin(Plugins.Editor)
editor.sig_open_files_finished.connect(
self.update_all_editors) | [
69,
2977,
1272
] |
def METHOD_NAME(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError(
"'libraries' option must be a list of tuples")
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError(
"each element of 'libraries' must a 2-tuple")
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError(
"first element of each tuple in 'libraries' "
"must be a string (the library name)")
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError("bad library name '%s': "
"may not contain directory separators" % lib[0])
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'libraries' "
"must be a dictionary (build info)") | [
250,
3106,
245
] |
def METHOD_NAME(fs: fsspec.AbstractFileSystem, monkeypatch, file_name: str):
monkeypatch.setattr(TransactionalFile, "LOCK_TTL_SECONDS", 1)
writer_1 = TransactionalFile(file_name, fs)
writer_2 = TransactionalFile(file_name, fs)
writer_1.acquire_lock()
writer_1._stop_heartbeat()
time.sleep(2.0)
# Ensure a lock can be acquired after the TTL has expired
assert writer_2.acquire_lock(blocking=False)
writer_2.release_lock() | [
9,
171,
1853,
858,
3191
] |
def METHOD_NAME(self, job: "CreateBackupJob") -> None:
if job.backup_upload_error_message != "":
# If the job contains an error message we pass it along so the UI can display it.
self.creatingStateChanged.emit(is_creating = False, error_message = job.backup_upload_error_message)
else:
self.creatingStateChanged.emit(is_creating = False) | [
69,
172,
1756
] |
def METHOD_NAME(session, url):
for res in session.read(url):
status = res.get('phase', 'error')
percent = res.get('progress', None)
detail = res.get('detail', repr(res)),
if status == 'error':
text = 'error!'
else:
text = '{0}: {1:3.0f}%'.format(status, percent)
return text, status, detail | [
19,
86,
3064
] |
def METHOD_NAME(self):
self._test('=?us-ascii?b?dmk=?=', 'vi') | [
9,
53,
1484
] |
def METHOD_NAME(self, AttachmentsData, invoice_id):
AttachModel = self.env["fatturapa.attachments"]
for attach in AttachmentsData:
if not attach.NomeAttachment:
name = _("Attachment without name")
else:
name = attach.NomeAttachment
content = attach.Attachment.encode()
_attach_dict = {
"name": name,
"datas": content,
"description": attach.DescrizioneAttachment or "",
"compression": attach.AlgoritmoCompressione or "",
"format": attach.FormatoAttachment or "",
"invoice_id": invoice_id,
}
AttachModel.create(_attach_dict) | [
297,
81
] |
def METHOD_NAME(self):
return self._fp.METHOD_NAME() | [
7385
] |
def METHOD_NAME(dttm: str, grain: str, expected: str) -> None:
from superset.db_engine_specs.sqlite import SqliteEngineSpec
engine = create_engine("sqlite://")
connection = engine.connect()
connection.execute("CREATE TABLE t (dttm DATETIME)")
connection.execute("INSERT INTO t VALUES (?)", dttm)
# pylint: disable=protected-access
expression = SqliteEngineSpec._time_grain_expressions[grain].format(col="dttm")
sql = f"SELECT {expression} FROM t"
result = connection.execute(sql).scalar()
assert result == expected | [
9,
104,
11445,
7580
] |
def METHOD_NAME(self) -> cftime.DatetimeJulian:
return self._time | [
104
] |
METHOD_NAME(self, key): | [
19,
75
] |
def METHOD_NAME(x, max_absval, num_bits):
y = x
# get maximum/minimum exponent
n1 = np.floor(np.log2(max_absval)) + (np.log2(max_absval) -
np.floor(np.log2(max_absval)) >= np.log2(1.5))
n2 = n1 + 1 - 2 ** (num_bits - 2)
pruning_threshold = 2 ** (n2 - 1)
# prune all small values
y[np.abs(x) < pruning_threshold] = 0.0
# quantize remaining values to powers of two
_i = y != 0
_s = np.sign(y[_i])
_b = np.log2(np.abs(y[_i]))
_d = np.log2(1.5) # quantization threshold
# _d = 0.5 use geometric mean
# _d = np.log2(1.5) use arithmetic mean
_e = np.floor(_b) + (_b - np.floor(_b) >= _d)
_e = np.maximum(n2, np.minimum(n1, _e))
y[_i] = _s * 2 ** (_e)
return y | [
1429
] |
def METHOD_NAME(self):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
return self.intersectionCount | [
19,
14344
] |
def METHOD_NAME(is_sac):
mock_specs = mb.create_mock_banana_behavior_specs()
bc_settings = BehavioralCloningSettings(
demo_path=os.path.dirname(os.path.abspath(__file__)) + "/" + "testdcvis.demo"
)
bc_module = create_bc_module(mock_specs, bc_settings, True, is_sac)
stats = bc_module.update()
assert_stats_are_float(stats) | [
9,
13338,
5820,
1362,
86
] |
def METHOD_NAME(
message, subject, from_addr, to_addrs, cc_addrs, additional_headers, priority, logger
):
"""Sets headers on Mimetext message"""
message["Subject"] = subject
message["From"] = from_addr
message["To"] = ", ".join(to_addrs)
if cc_addrs:
# NOTE filter out unenhanced cc_addrs added by self.get_mimetext_message
cc_addrs = [cc for cc in cc_addrs if is_email(cc)]
message["Cc"] = ", ".join(cc_addrs)
if additional_headers:
for k, v in additional_headers.items():
message[k] = v
if priority and priority_header_is_valid(priority, logger):
priority = PRIORITIES[str(priority)].copy()
for key in priority:
message[key] = priority[key]
return message | [
0,
-1,
2131
] |
def METHOD_NAME():
# COO graph:
# [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
# [2, 4, 2, 3, 0, 1, 1, 0, 0, 1]
# [1, 1, 1, 1, 0, 0, 0, 0, 0] - > edge type.
# num_nodes = 5, num_n1 = 2, num_n2 = 3
ntypes = {"n1": 0, "n2": 1}
etypes = {"n1:e1:n2": 0, "n2:e2:n1": 1}
metadata = gb.GraphMetadata(ntypes, etypes)
indptr = torch.LongTensor([0, 2, 4, 6, 8, 10])
indices = torch.LongTensor([2, 4, 2, 3, 0, 1, 1, 0, 0, 1])
type_per_edge = torch.LongTensor([1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
node_type_offset = torch.LongTensor([0, 2, 5])
return gb.from_csc(
indptr,
indices,
node_type_offset=node_type_offset,
type_per_edge=type_per_edge,
metadata=metadata,
) | [
19,
7271,
303
] |
def METHOD_NAME(
staff_api_client, permission_manage_orders, draft_order
):
# given
order = draft_order
order.status = OrderStatus.DRAFT
order.save(update_fields=["status"])
line = order.lines.first()
product = line.variant.product
product.channel_listings.update(available_for_purchase_at=None)
order_id = graphene.Node.to_global_id("Order", order.id)
variables = {"id": order_id}
staff_api_client.user.user_permissions.add(permission_manage_orders)
# when
response = staff_api_client.post_graphql(ORDER_CAN_FINALIZE_QUERY, variables)
content = get_graphql_content(response)
# then
assert content["data"]["order"]["canFinalize"] is False
errors = content["data"]["order"]["errors"]
assert len(errors) == 1
assert errors[0]["code"] == OrderErrorCode.PRODUCT_UNAVAILABLE_FOR_PURCHASE.name
assert errors[0]["field"] == "lines"
assert errors[0]["orderLines"] == [graphene.Node.to_global_id("OrderLine", line.pk)] | [
9,
1046,
977,
852,
1188,
1731,
43
] |
def METHOD_NAME(x, distribution1, distribution2):
if distribution1.n_dimensions != distribution2.n_dimensions:
raise NameError("mapping_dx: The two distributions are not compatible")
return distribution2.ppf_dx(distribution1.cdf(x)) * distribution1.cdf_dx(x) | [
445,
5710
] |
def METHOD_NAME(license_pools, identifier):
"""Find and return a LicensePool object with the specified identifier.
:param license_pools: List of LicensePool objects
:type license_pools: List[core.model.licensing.LicensePool]
:param identifier: Identifier to look for
:type identifier: str
:return: LicensePool object with the specified identifier, None otherwise
:rtype: Optional[core.model.licensing.LicensePool]
"""
for license_pool in license_pools:
if license_pool.identifier.identifier == identifier:
return license_pool
return None | [
19,
6226,
604,
769
] |
def METHOD_NAME(self):
steps, hit_addrs, finished = arch_data["mips64"]
self._emulate("mips64", "test_arrays", False, steps, hit_addrs, finished) | [
9,
7613
] |
def METHOD_NAME(config_path, bin_length, measurements, classifiers):
calculator = TimeBinsClfCalculator(config_path=config_path,
bin_length=bin_length,
measurements=measurements,
classifiers=classifiers)
calculator.run()
assert os.path.isfile(calculator.save_path)
os.remove(calculator.save_path) | [
9,
104,
4704,
7310,
8688,
1080,
331
] |
def METHOD_NAME(self, bcs=None):
"""Return a null cell to node map."""
if bcs is not None:
raise RuntimeError("Can't apply boundary conditions to a Constant")
return None | [
118,
1716,
422
] |
def METHOD_NAME(self, geometry_seq) -> List:
return self._jvm.PythonConverter.translateGeometrySeqToPython(geometry_seq) | [
711,
1525,
5355,
24,
440
] |
def METHOD_NAME(context):
"""
verify that the subscription is stored in mongo
:param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave.
"""
props_mongo = properties_class.read_properties()[MONGO_ENV] # mongo properties dict
__logger__.debug(" >> verifying that subscription is stored in mongo")
mongo = Mongo(host=props_mongo["MONGO_HOST"], port=props_mongo["MONGO_PORT"], user=props_mongo["MONGO_USER"],
password=props_mongo["MONGO_PASS"])
ngsi = NGSI()
ngsi.verify_subscription_stored_in_mongo(mongo, context.cb.get_subscription_context(), context.cb.get_headers(), context.resp)
__logger__.info(" >> verified that subscription is stored in mongo") | [
1162,
3800,
983,
835,
137,
5125,
623
] |
def METHOD_NAME() -> None:
"""Write git attributes in ~/.config/git/attributes."""
git_config = home / ".config" / "git"
git_config.mkdir(exist_ok=True, parents=True)
line_to_add = "*.gds diff=gdsdiff\n"
# Specify the path to the .config/git/attributes file
dirpath = home / ".config" / "git"
dirpath.mkdir(exist_ok=True, parents=True)
file_path = dirpath / "attributes"
# Read the file to check if the line already exists
file_content = file_path.read_text() if file_path.exists() else ""
# Add the line only if it doesn't exist
if line_to_add not in file_content:
with open(file_path, "a") as file:
file.write(line_to_add) | [
77,
1493,
177
] |
def METHOD_NAME(self):
path = 'path'
issue2 = self.issue.relative_to(path)
self.issue.origin.relative_to.assert_called_with(path)
self.assertIs(self.issue.message, issue2.message)
self.assertIs(self.issue.severity, issue2.severity)
self.assertIs(self.issue.kind, issue2.kind)
self.assertIs(self.issue.origin.relative_to(path), issue2.origin) | [
9,
1821,
24,
41,
1788
] |
def METHOD_NAME(
script: PipTestEnvironment,
) -> None:
"""
Test search exit status code for no matches
"""
result = script.pip("search", "nonexistentpackage", expect_error=True)
assert result.returncode == NO_MATCHES_FOUND, result.returncode | [
9,
1070,
538,
452,
544,
1646,
8818
] |
def METHOD_NAME():
with FluxExecutor() as executor:
executor.start()
future = executor.submit(bad_foo, {})
with pytest.raises(ValueError, match=ERRMSG):
future.result()
assert isinstance(future.exception(), ValueError) | [
9,
1366
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.