text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
"""Get default Frida Scripts."""
combined_script = []
header = []
if not self.defaults:
return header
def_scripts = os.path.join(self.frida_dir, 'default')
files = glob.glob(def_scripts + '**/*.js', recursive=True)
for item in files:
script = Path(item)
if script.stem in self.defaults:
header.append('send("Loaded Frida Script - {}");'.format(
script.stem))
combined_script.append(script.read_text())
return header + combined_script | [
19,
235,
2942
] |
def METHOD_NAME(self):
return self.FAN_NUM_1_IDX | [
19,
2667,
1337,
447
] |
def METHOD_NAME(self, docnames: Iterable[str], otherdata: dict[str, Any]) -> None:
for labelid, (doc, eqno) in otherdata['objects'].items():
if doc in docnames:
self.equations[labelid] = (doc, eqno)
for docname in docnames:
self.data['has_equations'][docname] = otherdata['has_equations'][docname] | [
411,
-1
] |
def METHOD_NAME(product_ids: List[int]):
products = Product.objects.filter(pk__in=product_ids)
update_products_discounted_prices(products) | [
86,
4866,
5600,
2357,
758
] |
def METHOD_NAME(scope):
from PYME.Acquire.Hardware.Simulator import METHOD_NAME
scope.METHOD_NAME = METHOD_NAME.SimController(scope)
scope.METHOD_NAME.gen_fluors_wormlike() | [
-1
] |
def METHOD_NAME(
self, dataset_name: str, feature_name: str, global_mean: float, global_count: float
) -> float:
df = self.data[dataset_name]
tmp = (df[feature_name] - global_mean) * (df[feature_name] - global_mean)
variance = tmp.sum() / (global_count - 1)
return variance.item() | [
2873,
41,
314
] |
def METHOD_NAME(three_experiments_family_same_name, monkeypatch, capsys):
"""Test that two experiments with the same name and different versions are correctly printed
even when one of them has a child.
"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(["list"])
captured = capsys.readouterr().out
assert (
captured
== """\
┌test_single_exp-v2
test_single_exp-v1┤
└test_single_exp_child-v1 | [
9,
2962,
6498,
1101,
156
] |
def METHOD_NAME(self, viewer, event):
scale_factor = event.scale
self.stretch_colormap(scale_factor) | [
-1,
905
] |
def METHOD_NAME(self, *vargs, **kwargs):
"""
No-op logging. Only level needed for now.
"""
pass | [
290
] |
def METHOD_NAME():
class TestOp(aesara.graph.op.Op):
itypes = [dvector, dvector, dvector]
otypes = [dvector]
def perform(self, node, inputs, outputs):
pass
msg = r"^Invalid input types for Op.*"
with pytest.raises(TypeError, match=msg):
TestOp()(dvector(), dscalar(), dvector()) | [
9,
441,
532,
362,
119
] |
def METHOD_NAME(self, config):
desc = super().METHOD_NAME(config)
desc.extend(self.describe_hw_config(config))
return desc | [
567,
1066,
830,
1067
] |
def METHOD_NAME(
configuration: Dict[str, Any],
block_id: int,
previous_block_output: int,
) -> Tuple[int, int]:
"""Validate block arguments.
Args:
configuration: Architecture configuration.
block_id: Block ID.
previous_block_output: Previous block output size.
Returns:
input_size: Block input size.
output_size: Block output size.
"""
block_type = configuration.get("block_type")
if block_type is None:
raise ValueError(
"Block %d in encoder doesn't have a type assigned. " % block_id
)
if block_type in ["branchformer", "conformer", "ebranchformer"]:
if configuration.get("linear_size") is None:
raise ValueError(
"Missing 'linear_size' argument for X-former block (ID: %d)" % block_id
)
if configuration.get("conv_mod_kernel_size") is None:
raise ValueError(
"Missing 'conv_mod_kernel_size' argument for X-former block (ID: %d)"
% block_id
)
input_size = configuration.get("hidden_size")
output_size = configuration.get("hidden_size")
elif block_type == "conv1d":
output_size = configuration.get("output_size")
if output_size is None:
raise ValueError(
"Missing 'output_size' argument for Conv1d block (ID: %d)" % block_id
)
if configuration.get("kernel_size") is None:
raise ValueError(
"Missing 'kernel_size' argument for Conv1d block (ID: %d)" % block_id
)
input_size = configuration["input_size"] = previous_block_output
else:
raise ValueError("Block type: %s is not supported." % block_type)
return input_size, output_size | [
187,
573,
134
] |
def METHOD_NAME(vertices):
return _dist(vertices, np.roll(vertices, -1, axis=-2)).sum(-1) | [
9229,
-1
] |
def METHOD_NAME(self, content):
a_content = get_active_lines(content)
if not a_content:
raise ParseException("mongod.conf is empty or all lines are comments")
self.is_yaml = self._file_type_is_yaml(a_content)
try:
if self.is_yaml:
self.data = yaml.safe_load('\n'.join(content))
else:
self.data = split_kv_pairs(content, use_partition=True)
except Exception as e:
raise ParseException('mongod conf parse failed: %s', e) | [
214,
459
] |
def METHOD_NAME() -> Tuple[Callable, Callable]:
buffer = []
def __print(*args):
for arg in args:
buffer.append(arg)
def __show():
nonlocal buffer
print("".join(buffer), flush=True, end="")
buffer = []
return __print, __show | [
4360,
38
] |
def METHOD_NAME(self, k=6): # pragma: no cover
"""Computes a tensor with the eigenvectors for the Hamiltonian.
Args:
k (int): Number of eigenvalues to calculate if the Hamiltonian
was created using a sparse matrix. This argument is ignored
if the Hamiltonian was created using a dense matrix.
See :meth:`qibo.backends.abstract.AbstractBackend.eigh` for
more details.
"""
raise_error(NotImplementedError) | [
12245
] |
def METHOD_NAME(x):
return log(x)/log(2.0) | [
5280
] |
def METHOD_NAME():
_assert_aie(
"[-__-'] Girls und Panzer OVA Anzio-sen [BD 1080p FLAC] [231FDA45].mkv",
name="Girls und Panzer OVA Anzio-sen",
subberTag="-__-'",
extension="mkv",
resolution="1080p",
releaseSource=["BD"],
audioType=["FLAC"],
hash="231FDA45",
) | [
9,
17531,
17203,
61,
654,
-1
] |
def METHOD_NAME(self):
"""If bad year range definition, a UserError is raised.
Range origin must be expressed on 4 digits.
"""
with self.assertRaises(ui.UserError):
self._setup_config(bucket_year=['62-64']) | [
9,
1068,
842,
661,
2483,
-1
] |
def METHOD_NAME(
tiles: np.ndarray,
tile_shape: Tuple[int, ...],
sample_shape: Optional[Tuple[int, ...]],
dtype: Optional[Union[str, np.dtype]] = None,
) -> np.ndarray:
"""Coalesce tiles into a single array of shape `sample_shape`.
Args:
tiles (np.ndarray): numpy object array of tiles.
tile_shape (Tuple[int, ...]): Tile shape. Corner tiles may be smaller than this.
sample_shape (Optional, Tuple[int, ...]): Shape of the output array. The sum of all actual tile shapes are expected to be equal to this.
dtype (Optional, Union[str, np.dtype]): Dtype of the output array. Should match dtype of tiles.
Raises:
TypeError: If `tiles` is not deserialized.
Returns:
np.ndarray: Sample array from tiles.
"""
if dtype is None:
dtype = next(iter(tiles.flat)).dtype
ndim = tiles.ndim
sample_shape = sample_shape or tuple( # Infer sample shape from tile shapes
sum(
tile.shape[i]
for tile in tiles[tuple(slice(None) if j == i else 0 for j in range(ndim))] # type: ignore
)
for i in range(ndim)
)
sample = np.empty(sample_shape, dtype=dtype)
if tiles.size <= 0:
return sample
for tile_coords, tile in np.ndenumerate(tiles):
low = np.multiply(tile_coords, tile_shape)
high = low + tile.shape
idx = tuple(slice(l, h) for l, h in zip(low, high))
view = sample[idx]
view[:] = tile
return sample | [
9515,
299
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Tenant ID.
"""
return pulumi.get(self, "tenant_id") | [
4154,
147
] |
def METHOD_NAME(var):
val = os.environ.get(var)
if val is None:
print("ERROR: Env var ", var, " not set.")
sys.exit(1)
return val | [
19,
485,
486
] |
def METHOD_NAME(self):
"""reads header from self.input_file"""
pass | [
203,
572
] |
def METHOD_NAME(path: Path, pretend=False):
"""Create the virtual environment with the first technique available.
(``virtualenv`` is preferred because it is faster).
"""
for creator in (create_with_virtualenv, create_with_stdlib):
with suppress(ImportError):
creator(path, pretend)
break
else:
# no break statement found, so no creator function executed correctly
raise NotInstalled() | [
129
] |
def METHOD_NAME(context: Context) -> None:
"""Check that the response contains the measurements."""
assert_in("measurements", context.response.json()) | [
250,
3378,
4512,
6529
] |
def METHOD_NAME(context, timeout_s):
timeout_s = int(timeout_s or 30)
timeout = time.monotonic() + timeout_s
ready = False
while not ready and time.monotonic() < timeout:
try:
log = slurp_file('naemon.log')
# When we see this line in the log, we'll wait 1 more second and
# then Naemon should be ready, with signal handlers setup so that a
# test can SIGTERM it.
if 'Successfully launched command file worker with pid' in log:
ready = True
time.sleep(1)
break
except OSError:
pass
time.sleep(1)
assert ready, "Naemon did not start within %d seconds" % timeout_s | [
6726,
3550,
61,
1338
] |
def METHOD_NAME(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str | [
1209,
128
] |
f METHOD_NAME(self) -> int: | [
724
] |
def METHOD_NAME(self):
if self.settings.os == "Windows":
self.cpp_info.libs = ["pdcurses"]
elif self.settings.os in ("FreeBSD", "Linux"):
self.cpp_info.includedirs.append(os.path.join("include", "xcurses"))
self.cpp_info.libs = ["XCurses"] | [
360,
100
] |
def METHOD_NAME(
context: Context, data_dict: Optional[DataDict] = None
) -> Activity:
try:
return context["activity"]
except KeyError:
if not data_dict:
data_dict = {}
id = data_dict.get("id", None)
if not id:
raise tk.ValidationError(
{"message": "Missing id, can not get Activity object"}
)
obj = Activity.get(id)
if not obj:
raise tk.ObjectNotFound()
# Save in case we need this again during the request
context["activity"] = obj
return obj | [
19,
809,
279
] |
def METHOD_NAME(self, m):
user = StaffUserFactory.create()
url = furl(reverse("api:iotypen-list"))
url.args["zgw_api_group"] = "INVALID"
self.client.force_login(user)
response = self.client.get(url.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), []) | [
9,
527,
41,
532,
49
] |
def METHOD_NAME(index):
logging.info(f"Selecting index {index}")
proc = subprocess.Popen(['btmgmt', '-i', str(index), 'power', 'off'],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate() | [
-1,
828,
656
] |
def METHOD_NAME(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options) | [
19,
2931,
604,
156
] |
def METHOD_NAME(self, field, index=None, metadata=None):
'''
:param field: Output field name of this analysis
:type field: string
:param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array
:type index: int
:param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome"
:type metadata: string
Returns a dict containing a valid reference to an output of this analysis.
'''
link = {"$dnanexus_link": {"analysis": self._dxid, "field": field}}
if index is not None:
link["$dnanexus_link"]["index"] = index
if metadata is not None:
link["$dnanexus_link"]["metadata"] = metadata
return link | [
19,
146,
2360
] |
def METHOD_NAME(_: WIDParams):
"""
Please configure IUT's OOB data flag with 'No remote OOB data present'
TODO: This is done by default but we should set it explicitly
"""
return True | [
5794,
5795,
-1
] |
def METHOD_NAME(value):
return 1 if value else 0 | [
137,
1111
] |
def METHOD_NAME(rave_robot, q, qd, qdd, forceslist=None, returncomponents=True):
"""Inverse dynamics equation.
Simple wrapper around OpenRAVE's ComputeInverseDynamics
function. Return the numerical values of the components of the
inverse dynamics equation.
M(q) qdd + C(q, qd) qd + g(q)
= t1 + t2 + t3
Parameters
----------
rave_robot : OpenRAVE.robot
q : (_N, ) ndarray
Joint position.
qd : (_N, ) ndarray
Joint velocity.
qdd : (_N, ) ndarray
Joint acceleration.
returncomponents : Bool
If True, return the list [t1, t2, t3]
If False, return t1 + t2 + t3
Returns
-------
res : (3, ) List, or ndarray
See returncomponents parameter.
"""
if np.isscalar(q): # Scalar case
_q = [q]
_qd = [qd]
_qdd = [qdd]
else:
_q = q
_qd = qd
_qdd = qdd
# Temporary remove kinematic Limits
vlim = rave_robot.GetDOFVelocityLimits()
alim = rave_robot.GetDOFAccelerationLimits()
rave_robot.SetDOFVelocityLimits(100 * vlim)
rave_robot.SetDOFAccelerationLimits(100 * alim)
# Do computation
with rave_robot:
rave_robot.SetDOFValues(_q)
rave_robot.SetDOFVelocities(_qd)
res = rave_robot.ComputeInverseDynamics(
_qdd, forceslist, returncomponents=returncomponents
)
# Restore kinematic limits
rave_robot.SetDOFVelocityLimits(vlim)
rave_robot.SetDOFAccelerationLimits(alim)
return res | [
5862,
2297
] |
async def METHOD_NAME(self):
dataflow = create_dataflow(
extract_tar_archive,
{
"input_file_path": self.test_file_pth,
"output_directory_path": self.test_dir_pth,
},
)
m_open = mock_open()
with patch("builtins.open", m_open), patch(
"tarfile.TarFile.extractall"
), patch("tarfile.TarInfo.fromtarfile", m_open):
async for _, _ in run(dataflow):
m_open.assert_any_call("test/path/to/tar_file.tar", "rb") | [
9,
297,
754,
441
] |
def METHOD_NAME(self):
"Test ML.TimeObject constructor"
timeObject = ML.TimeObject()
self.assertEqual(timeObject.GetTime(), 0.0) | [
9,
2821
] |
def METHOD_NAME(time_of_interest: datetime):
""" Returns the start and end datetimes for hourly readings based on given time of interest """
# by default, we want the past 5 days, and if available the next 10 days.
start_time_stamp = time_of_interest - timedelta(days=5)
# the UI is interested in hourly reading before and after the time of interest.
end_time_stamp = time_of_interest + timedelta(days=10)
return start_time_stamp, end_time_stamp | [
19,
104,
3223
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(
data: dataBlob,
system: System,
instrument_code: str,
lower_position: float,
upper_position: float,
) -> bufferedOptimalPositions:
diag_contracts = dataContracts(data)
reference_price = system.rawdata.get_daily_prices(instrument_code).iloc[-1]
reference_contract = diag_contracts.get_priced_contract_id(instrument_code)
position_entry = bufferedOptimalPositions(
date=datetime.datetime.now(),
lower_position=lower_position,
upper_position=upper_position,
reference_price=reference_price,
reference_contract=reference_contract,
)
return position_entry | [
363,
195,
475
] |
def METHOD_NAME(font_size, is_bold=False):
"""creates a QFont"""
font = QFont()
font.setPointSize(font_size)
if is_bold:
font.setBold(is_bold)
return font | [
93,
2584
] |
def METHOD_NAME(graph):
"""Test that the Markov condition holds for each PGM graph."""
for node in graph.nodes:
parents = set(graph.predecessors(node))
non_descendants = graph.nodes - nx.descendants(graph, node) - {node} - parents
assert nx.d_separated(graph, {node}, non_descendants, parents) | [
9,
7173,
405
] |
def METHOD_NAME(self):
tc = CMakeToolchain(self)
tc.variables["BUILD_TESTING"] = False
tc.variables["BUILD_UTILITIES"] = False
tc.variables["ENABLE_TESTS"] = False
tc.variables["ENABLE_FILTER_TESTING"] = False
tc.variables["ENABLE_NETCDF_4"] = self.options.netcdf4
tc.variables["ENABLE_CDF5"] = self.options.cdf5
tc.variables["ENABLE_DAP"] = self.options.dap
tc.variables["ENABLE_BYTERANGE"] = self.options.byterange
tc.variables["USE_HDF5"] = self.options.with_hdf5
tc.variables["NC_FIND_SHARED_LIBS"] = self.options.with_hdf5 and self.dependencies["hdf5"].options.shared
# Honor BUILD_SHARED_LIBS from conan_toolchain (see https://github.com/conan-io/conan/issues/11840)
tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0077"] = "NEW"
tc.METHOD_NAME()
tc = CMakeDeps(self)
tc.METHOD_NAME() | [
567
] |
def METHOD_NAME(name: str) -> Generator[CheckResult, None, None]:
with checker.METHOD_NAME(name) as subcheck:
yield subcheck | [
250
] |
def METHOD_NAME(self) -> None:
for table_fqn in self.fqn_dbt_tables():
table: Table = self.openmetadata.get_by_name(
entity=Table, fqn=table_fqn, fields=["*"]
)
data_model = table.dataModel
self.assertTrue(len(data_model.columns) > 0)
self.assertIsNotNone(data_model.rawSql)
self.assertIsNotNone(data_model.sql)
self.assertIsNotNone(data_model.upstream)
self.assertIsNotNone(data_model.description)
self.assertIsNotNone(table.description)
self.assertIsNotNone(data_model.owner)
self.assertIsNotNone(table.owner)
self.assertTrue(len(data_model.tags) > 0)
self.assertTrue(len(table.tags) > 0) | [
9,
5399
] |
def METHOD_NAME():
"""Returns the symbolic positive infinity
Returns
----------
neg_inf : Var
A symbolic var that indicates positive infinity
"""
return _ffi_api.NegInf() | [
1961,
1962
] |
def METHOD_NAME(self, env_name, num_workers):
"""These envs only have a single action and might cause unique problems with 0-D vs 1-D tensors."""
self._run_test_env(env=env_name, num_workers=num_workers, batched_sampling=True) | [
9,
97,
1006,
6,
3781
] |
def METHOD_NAME(
vertices: AbstractSet[str], edges: Dict[str, AbstractSet[str]]
) -> Iterator[AbstractSet[str]]:
"""Compute Strongly Connected Components of a directed graph.
Args:
vertices: the labels for the vertices
edges: for each vertex, gives the target vertices of its outgoing edges
Returns:
An iterator yielding strongly connected components, each
represented as a set of vertices. Each input vertex will occur
exactly once; vertices not part of a SCC are returned as
singleton sets.
From http://code.activestate.com/recipes/578507/.
"""
identified: Set[str] = set()
stack: List[str] = []
index: Dict[str, int] = {}
boundaries: List[int] = []
def dfs(v: str) -> Iterator[Set[str]]:
index[v] = len(stack)
stack.append(v)
boundaries.append(index[v])
for w in edges[v]:
if w not in index:
yield from dfs(w)
elif w not in identified:
while index[w] < boundaries[-1]:
boundaries.pop()
if boundaries[-1] == index[v]:
boundaries.pop()
scc = set(stack[index[v] :])
del stack[index[v] :]
identified.update(scc)
yield scc
for v in vertices:
if v not in index:
yield from dfs(v) | [
8486,
2261,
811
] |
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}",
**self.url_parameters
) | [
274
] |
def METHOD_NAME(self):
form = FormFactory.create(
generate_minimal_setup=True,
formstep__form_definition__configuration={
"components": [{"type": "textfield", "key": "test"}]
},
)
variable = form.formvariable_set.get(key="test")
self.assertEqual("string", variable.data_type) | [
9,
1007,
1210,
365,
44,
5837,
0
] |
def METHOD_NAME(manifest_node) -> bool:
"""
Check if the manifest node is an ephemeral node
"""
if (
hasattr(manifest_node, "config")
and manifest_node.config
and hasattr(manifest_node.config, "materialized")
and manifest_node.config.materialized == "ephemeral"
):
return True
return False | [
250,
7850,
1716
] |
def METHOD_NAME(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition(), typically used to create
custom configuration files for a partition, for example
syslinux or grub config files.
"""
logger.debug("SourcePlugin: do_configure_partition: part: %s", part) | [
74,
111,
2312
] |
def METHOD_NAME(self, proposal):
if proposal.value is not None:
import warnings
warnings.warn("Layout properties overflow_x and overflow_y have been deprecated and will be dropped in a future release. Please use the overflow shorthand property instead", DeprecationWarning)
return proposal.value | [
187,
6130
] |
def METHOD_NAME(self, msg_params):
global_model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
# client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)
average_weight_dict = msg_params.get(MyMessage.MSG_ARG_KEY_AVG_WEIGHTS)
client_schedule = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_SCHEDULE)
client_indexes = client_schedule[self.worker_id]
self.round_idx = 0
self.__train(global_model_params, client_indexes, average_weight_dict) | [
276,
277,
176
] |
def METHOD_NAME(self):
"""Get the supported interval by a reader.
Returns:
IntervalSet(): set of supported intervals.
""" | [
19,
9623
] |
def METHOD_NAME(
decoy: Decoy,
subject: TemperatureModuleCore,
mock_engine_client: EngineClient,
) -> None:
"""Should verify EngineClient call to deactivate temp module."""
subject.deactivate()
decoy.verify(
mock_engine_client.temperature_module_deactivate(module_id="1234"), times=1
) | [
9,
931
] |
def METHOD_NAME(self):
path = "Phylip/two.dat"
# derived from http://atgc.lirmm.fr/phyml/usersguide.html
with open(path) as handle:
list2 = list(PhylipIterator(handle))
self.assertEqual(len(list2), 1)
self.assertEqual(len(list2[0]), 5)
path = "Phylip/three.dat"
with open(path) as handle:
list3 = list(PhylipIterator(handle))
self.assertEqual(len(list3), 1)
self.assertEqual(len(list3[0]), 5)
for i in range(0, 5):
self.assertEqual(list2[0][i].id, list3[0][i].id)
self.assertEqual(list2[0][i].seq, list3[0][i].seq) | [
9,
1603,
61,
2756
] |
def METHOD_NAME(self,name):
if ("fiwalk" in self.in_element) and ("creator" in self.in_element) and ("version" in self.in_element):
raise XMLDone(self.cdata)
if ("fiwalk" in self.in_element) and ("fiwalk_version" in self.in_element):
raise XMLDone(self.cdata)
if ("version" in self.in_element) and ("dfxml" in self.in_element) and ("creator" in self.in_element):
raise XMLDone(self.cdata)
self.in_element.pop()
self.cdata = "" | [
1798,
669
] |
def METHOD_NAME(self):
ecs_client = mock.MagicMock
ecs_client.task_definitions = []
with mock.patch(
"prowler.providers.aws.services.ecs.ecs_service.ECS",
ecs_client,
):
from prowler.providers.aws.services.ecs.ecs_task_definitions_no_environment_secrets.ecs_task_definitions_no_environment_secrets import (
ecs_task_definitions_no_environment_secrets,
)
check = ecs_task_definitions_no_environment_secrets()
result = check.execute()
assert len(result) == 0 | [
9,
654,
758,
2706
] |
def METHOD_NAME(self, other_archive: Archiver) -> bool:
"""Replace the current zip with one copied from another archive"""
try:
with py7zr.SevenZipFile(self.path, "w") as zout:
for filename in other_archive.get_filename_list():
data = other_archive.read_file(
filename
) # This will be very inefficient if other_archive is a 7z file
if data is not None:
zout.writestr(data, filename)
except Exception as e:
logger.error("Error while copying to 7zip archive [%s]: from %s to %s", e, other_archive.path, self.path)
return False
else:
return True | [
215,
280,
1622
] |
def METHOD_NAME(self):
set_parallel_chunksize(0) | [
0,
1
] |
def METHOD_NAME(self):
self.invalid_key_message = ""
pn_fake_key_config = pnconf_copy()
pn_fake_key_config.publish_key = "fake"
PubNub(pn_fake_key_config).publish() \
.channel("ch1") \
.message("hey") \
.pn_async(self.callback)
self.event.wait()
assert self.status.is_error()
assert self.status.category is PNStatusCategory.PNBadRequestCategory
assert self.status.original_response[0] == 0
assert self.status.original_response[1] == 'Invalid Key'
assert "HTTP Client Error (400):" in str(self.status.error_data.exception)
assert "Invalid Key" in str(self.status.error_data.exception) | [
9,
532,
59
] |
def METHOD_NAME():
# Disable validation of envs
old_get_env = ENV_OPTS["tid"].getter
ENV_OPTS["tid"].getter = get_environment
yield
ENV_OPTS["tid"].getter = old_get_env | [
654,
16509,
250
] |
def METHOD_NAME(s: str, width: int, sep: str = '\n') -> str:
"""Fill paragraphs with newlines (or custom separator)."""
return sep.join(fill(p, width) for p in s.split(sep)) | [
1917,
1759
] |
def METHOD_NAME(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListNamespaceKeysResult:
"""
Primary and secondary connection strings to the namespace.
Azure REST API version: 2021-11-01.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:relay:listNamespaceKeys', __args__, opts=opts, typ=ListNamespaceKeysResult).value
return AwaitableListNamespaceKeysResult(
key_name=pulumi.get(__ret__, 'key_name'),
primary_connection_string=pulumi.get(__ret__, 'primary_connection_string'),
primary_key=pulumi.get(__ret__, 'primary_key'),
secondary_connection_string=pulumi.get(__ret__, 'secondary_connection_string'),
secondary_key=pulumi.get(__ret__, 'secondary_key')) | [
245,
1194,
219
] |
def METHOD_NAME(cls, data, header_fields, sheet):
data_map = defaultdict(list)
for i in data:
row = cls.create_row(i, header_fields)
if sheet not in data_map:
data_map[sheet].append(list(row.keys()))
data_map[sheet].append(list(row.values()))
return data_map | [
238,
1346
] |
def METHOD_NAME(cmd, pin):
conn.sendall(compose(MsgType.HW_SYNC, cmd, pin))
while True:
data = receive(conn, hdr.size)
if not data:
log.warning("Data read timeout")
sys.exit(1)
msg_type, msg_id, msg_len = hdr.unpack(data)
if msg_type == MsgType.RSP:
log.debug(" > %2d,%2d : status %2d", msg_type, msg_id, msg_len)
elif msg_type == MsgType.HW or msg_type == MsgType.BRIDGE:
data = receive(conn, msg_len).decode('utf-8').split("\0")
log.debug(" > %2d,%2d,%2d : %s", msg_type, msg_id, msg_len, "=".join(data))
if data[0] == cmd[0]+'w' and data[1] == pin:
data = data[2:]
if len(data) > 1:
print(data)
else:
print(data[0])
break | [
74,
203
] |
def METHOD_NAME(editors_demoid):
"""
Return the list of editors
"""
resp = post('demo_get_editors_list', {"demo_id": editors_demoid})
response = resp.json()
if not response['status'] == 'OK':
print("ERROR: get_editors returned KO")
return
editors_list = {}
# print(response['editor_list'][0]['name'])
for editor in response['editor_list']:
editors_list[editor["mail"]] = editor["name"]
return editors_list | [
19,
7498
] |
def METHOD_NAME(self):
if not self.print_routes:
return
fib.FibRoutesInstalledCmd(self.cli_opts).run([]) | [
38,
6016,
3968,
1255
] |
def METHOD_NAME(step: ColorStep) -> AddLightActionRequest:
"""Helper to translate steps into their corresponding messages."""
return AddLightActionRequest(
transition_time=UInt16Field(step.transition_time_ms),
transition_type=LightTransitionTypeField(step.transition_type),
red=UInt8Field(step.color.r),
green=UInt8Field(step.color.g),
blue=UInt8Field(step.color.b),
white=UInt8Field(step.color.w),
) | [
4322,
1006,
280,
367
] |
def METHOD_NAME(proc):
logger.info('Try kill process')
times = 1
while proc.poll() is None:
proc.kill()
time.sleep(5)
if proc.poll() is not None:
logger.info('Process has been killed')
break
logger.info('Trial %d failed', times)
times += 1
if times > 3:
raise SystemExit() | [
1365,
643
] |
def METHOD_NAME(parent, info, page):
return Letters(parent) | [
6503,
717
] |
def METHOD_NAME():
return np.arange(5, 10) | [
5376
] |
def METHOD_NAME(time):
if not time:
return ''
ago = defaultfilters.METHOD_NAME(time)
# L10n: relative time in the past, like '4 days ago'
return gettext('{0} ago').format(ago) | [
-1
] |
def METHOD_NAME(self):
with self.app.app_context():
submissions = db.engine.execute(text("SELECT * FROM submissions")).fetchall()
# Submissions without a source should be deleted
assert len(submissions) == 1
for submission in submissions:
assert submission.source_id == self.valid_source_id
replies = db.engine.execute(text("SELECT * FROM replies")).fetchall()
# Replies without a source should be deleted
assert len(replies) == 1
for reply in replies:
assert reply.source_id == self.valid_source_id | [
250,
738
] |
def METHOD_NAME(self):
from corehq.messaging.scheduling.tasks import delete_alert_schedule_instances
with transaction.atomic():
self.deleted = True
self.deleted_on = datetime.utcnow()
self.save()
self.schedule.deleted = True
self.schedule.deleted_on = datetime.utcnow()
self.schedule.save()
delete_alert_schedule_instances.delay(self.schedule_id.hex) | [
1587,
34
] |
def METHOD_NAME(self, separator):
if separator and self._separator:
self._writer.write(self._separator) | [
77,
4509
] |
def METHOD_NAME(self, instance, text_color):
if not self.use_text_color:
self.use_text_color = True
return
self.text_color = get_hex_from_color(text_color)
self.use_text_color = False
self.foreground_color = (1, 1, 1, .999)
self._trigger_refresh_text() | [
69,
9451,
36
] |
def METHOD_NAME(self, add_data=True):
"""
Calculate the moment map
Parameters
----------
add_data : bool
Whether to add the resulting data object to the app according to ``add_results``.
"""
# Retrieve the data cube and slice out desired region, if specified
cube = self.dataset.get_object(cls=Spectrum1D, statistic=None)
spec_min, spec_max = self.spectral_subset.selected_min_max(cube)
slab = manipulation.spectral_slab(cube, spec_min, spec_max)
# Calculate the moment and convert to CCDData to add to the viewers
try:
n_moment = int(self.n_moment)
if n_moment < 0:
raise ValueError("Moment must be a positive integer")
except ValueError:
raise ValueError("Moment must be a positive integer")
# Need transpose to align JWST mirror shape: This is because specutils
# arrange the array shape to be (nx, ny, nz) but 2D visualization
# assumes (ny, nx) as per row-major convention.
data_wcs = getattr(cube.wcs, 'celestial', None)
if data_wcs:
data_wcs = data_wcs.swapaxes(0, 1) # We also transpose WCS to match.
self.moment = CCDData(analysis.moment(slab, order=n_moment).T, wcs=data_wcs)
fname_label = self.dataset_selected.replace("[", "_").replace("]", "")
self.filename = f"moment{n_moment}_{fname_label}.fits"
if add_data:
self.add_results.add_results_from_plugin(self.moment)
msg = SnackbarMessage("{} added to data collection".format(self.results_label),
sender=self, color="success")
self.hub.broadcast(msg)
self.moment_available = True
return self.moment | [
1593,
6158
] |
def METHOD_NAME(self):
if not self._datum_index:
self._datum_col.create_index('datum_id', unique=True)
self._datum_col.create_index('resource')
self._datum_index = True | [
129,
2030,
724
] |
def METHOD_NAME(dtype: dt.Array, value, name):
value_type = dtype.value_type
try:
bigquery_type = BigQueryType.from_ibis(value_type)
except NotImplementedError:
raise com.UnsupportedBackendType(dtype)
else:
if isinstance(value_type, dt.Struct):
query_value = [
bigquery_param(dtype.value_type, struct, f"element_{i:d}")
for i, struct in enumerate(value)
]
bigquery_type = "STRUCT"
elif isinstance(value_type, dt.Array):
raise TypeError("ARRAY<ARRAY<T>> is not supported in BigQuery")
else:
query_value = value
result = bq.ArrayQueryParameter(name, bigquery_type, query_value)
return result | [
3578,
49,
877
] |
def METHOD_NAME(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup | [
6123,
1220,
1906
] |
def METHOD_NAME(self) -> list[Ref]:
result = []
for entry in self._playlists_dir.iterdir():
if entry.suffix not in [".m3u", ".m3u8"]:
continue
if not entry.is_file():
continue
playlist_path = entry.relative_to(self._playlists_dir)
result.append(translator.path_to_ref(playlist_path))
result.sort(key=operator.attrgetter("name"))
return result | [
947,
245
] |
def METHOD_NAME(request, namespace, metric_dict) -> Dict:
"""Get pod metrics and write them to a json"""
metrics = get_pod_metrics(request, namespace)
metric_dict[f"{request.node.name}+{time.time()}"] = metrics
write_to_json(f"pod-metrics-{get_test_file_name(request.node.fspath)}.json", metric_dict)
return metrics | [
1444,
1097
] |
def METHOD_NAME(func_name: str, *args):
"""
Call function from functions.py to avoid circular imports.
:param func_name: Name of function.
:return: Result of function call.
"""
from nncf.experimental.tensor import functions
fn = getattr(functions, func_name)
return fn(*args) | [
128,
559
] |
def METHOD_NAME(self) -> None:
seed = self.config.training.seed
if seed is None:
return
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = self.config.training.cudnn_benchmark | [
111,
484
] |
def METHOD_NAME(self):
input_words = [" this is unicode Юникод"]
correct_words = ["this is unicode юникод"]
users_state = self._post_words(input_words)
assert ''.join({content['status'] for (_, content) in users_state.items()}) == 'success'
for user in self.users:
self.assertListEqual(
list(users_state[user.username]['student_words'].keys()),
correct_words) | [
9,
774
] |
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags") | [
114
] |
def METHOD_NAME(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationEntityListResult"]
"""Lists all of the available Storage Sync Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationEntityListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storagesync.models.OperationEntityListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationEntityListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationEntityListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.StorageSyncError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
) | [
245
] |
def METHOD_NAME(request, container):
return request.getfixturevalue('centers_' + container['shape']) | [
2683
] |
def METHOD_NAME(self) -> bool:
p, o, e = Popen_safe(self.make + ['--version'])
if p.returncode == 0 and ('GNU Make' in o or 'waf' in o):
return True
return False | [
1466,
494,
584
] |
f METHOD_NAME(self, start_lcm=1): | [
8951,
47,
-1
] |
def METHOD_NAME(self):
"""Test plot_shift()."""
x = np.random.normal(5.5, 2, 50)
y = np.random.normal(6, 1.5, 50)
plot_shift(x, y)
plot_shift(
x, y, n_boot=100, percentiles=[5, 55, 95], show_median=False, seed=456, violin=False
)
plot_shift(x, y, paired=True, n_boot=100, percentiles=[25, 75], confidence=0.90)
plt.close("all") | [
9,
1288,
929
] |
def METHOD_NAME(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] | [
1496
] |
def METHOD_NAME(self):
"""Measure performance with batch_size=1024, gpu=2 and
distribution_strategy='mirrored'
"""
batch_size = 1024
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
num_gpus=2,
distribution_strategy="mirrored",
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"hierarchical_rnn", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
) | [
1668,
14528,
1697,
8202,
12859,
1667,
988
] |
def METHOD_NAME(self, volume):
"""Attach node to volunme
Args
name (str|list|tuple): Volume name(s)
"""
# Get node object using id
node = self.cloud.get_node_by_id(self.id)
# Attach volumes to node
volumes = volume if type(volume) in (list, tuple) else (volume)
for v in volumes:
self.cloud.METHOD_NAME(node, self.cloud.get_volume_by_name(v))
return True | [
645,
2276
] |
def METHOD_NAME():
"""List fact names set on ansible rules."""
return list(sorted(collect_all_fact_names())) | [
19,
2985,
83
] |
f METHOD_NAME(self, subclassed): | [
9,
107,
362,
1195
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.