text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
async def METHOD_NAME(
self,
property_type_id: str,
*,
actor_id: UUID,
) -> PropertyTypeSchema:
"""Load an external property type."""
with with_actor(self.inner, actor_id):
await self.inner.load_external_property_type(
URL(property_type_id),
)
return await self.get_property_type(
property_type_id,
actor_id=actor_id,
is_after_load=True,
) | [
557,
1042,
44
] |
def METHOD_NAME(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp) | [
215
] |
def METHOD_NAME(A: dace.int64[5, 5], B: dace.int64):
return A <= B | [
9,
-1
] |
def METHOD_NAME(self):
L = self.tc.get('/SatoTateGroup/')
assert 'Browse' in L.get_data(as_text=True) and 'U(1)' in L.get_data(as_text=True) and 'U(1)_2' in L.get_data(as_text=True) and 'SU(2)' in L.get_data(as_text=True) and 'Rational' in L.get_data(as_text=True) | [
9,
57
] |
def METHOD_NAME(self) -> List[Instance]:
"""Get all passivbot instances running on this machine"""
signature = f"^{' '.join(INSTANCE_SIGNATURE_BASE)}"
pids = ProcessManager.get_pid(signature, all_matches=True)
if len(pids) == 0:
return []
instances_cmds = [ProcessManager.info(pid) for pid in pids]
instanaces = []
for cmd in instances_cmds:
args = cmd.split(" ")
if len(args) <= 3:
continue
args = args[3:]
user = args[0]
symbol = args[1]
config = args[2]
flags = {}
if len(args[3:]) > 0:
it = iter(args[3:])
flags = dict(zip(it, it))
instance = Instance({
"user": user,
"symbol": symbol,
"config": config,
"flags": flags
})
if instance.is_running():
instanaces.append(instance)
return instanaces | [
416,
13692,
2553
] |
def METHOD_NAME(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="[") | [
2100,
5111
] |
def METHOD_NAME(self, waypoint: FlightWaypoint) -> bool:
if waypoint in self.nav_to_drop_off:
self.nav_to_drop_off.remove(waypoint)
return True
elif super().METHOD_NAME(waypoint):
return True
return False | [
34,
4599
] |
def METHOD_NAME(build_node, namespace):
node = build_node("foo.bar")
with namespace.enter_scope():
namespace["foo"] = AddressT()
with pytest.raises(UnknownAttribute):
get_possible_types_from_node(node) | [
9,
309,
46
] |
def METHOD_NAME(self, sql, rows=None):
"""Execute insert query
Args
------------
sql : string
sql insert query in INSERT INTO TABLE VALUES (%s,...) format
rows : list
list of tuples as table rows
Returns
------------
: list
inserted ids
"""
ids = []
for row in rows:
self._curs.execute(sql, row)
ids.append(self._curs.fetchone()[0])
return ids | [
408,
1413
] |
def METHOD_NAME(arr):
"""Compute the inverse of the non-zero elements of arr, not changing 0.
Parameters
----------
arr : ndarray
Returns
-------
arr_inv : ndarray
Array containing the inverse of the non-zero elements of arr, and
zero elsewhere.
"""
arr_inv = arr.copy()
nz = np.nonzero(arr)
arr_inv[nz] = 1 / arr[nz]
return arr_inv | [
3220,
7211
] |
nc def METHOD_NAME(self, ctx, ven_dev = None): | [
8323
] |
def METHOD_NAME(self, expr):
'''
Parse a pythonic expression into symboliks state.
Example:
vw = vivisect.VivWorkspace()
vw.setMeta('Architecture','i386')
p = SymbolikExpressionParser(defwidth=vw.psize)
# If all var/imm elements are the "default width":
s = p.parseExpression('x + 30')
# Specifying index notation to override width:
s = p.parseExpression('x[2] + 30')
'''
a = ast.parse(expr)
return self.astToSymboliks(a.body[0]) | [
214,
1120
] |
def METHOD_NAME(value: int, num: int) -> bool:
"""Check if a variable is divisible by a number."""
return value % num == 0 | [
9,
11178
] |
def METHOD_NAME(each_version):
return Checker(each_version, False) | [
432,
130,
623,
1739
] |
def METHOD_NAME(client, project_id, cluster_id):
"""获取集群信息"""
context = copy.deepcopy(client.context)
# 原始集群ID
context["source_cluster_id"] = cluster_id
# 内部版bcs返回server_address
if "server_address" in context:
server_address_path = urlparse(context["server_address"]).path
else:
# 社区版直接bcs_api返回server_address_path
server_address_path = context["server_address_path"]
# API调用地址, 可以为http/https
server_address = f"{client._bcs_server_host}{server_address_path}"
context["server_address"] = server_address.rstrip("/")
# Kubectl Config地址, 必须为https
https_server_address = f"{client._bcs_https_server_host}{server_address_path}"
context["https_server_address"] = https_server_address.rstrip("/")
return context | [
19,
3761,
2059,
198
] |
def METHOD_NAME(
self,
rule_name: str,
variables: Optional[ParameterContainer] = None,
runtime_configuration: Optional[dict] = None,
) -> List[Domain]:
"""Obtains and returns Domain object, whose domain_kwargs consists of "column_list" (order-non-preserving).
Args:
rule_name: name of Rule object, for which "Domain" objects are obtained.
variables: Optional variables to substitute when evaluating.
runtime_configuration: Additional run-time settings (see "Validator.DEFAULT_RUNTIME_CONFIGURATION").
Returns:
List of domains that match the desired tolerance limits.
"""
batch_ids: List[str] = self.get_batch_ids(variables=variables) # type: ignore[assignment] # could be None
validator: Validator = self.get_validator(variables=variables) # type: ignore[assignment] # could be None
effective_column_names: List[str] = self.get_effective_column_names(
batch_ids=batch_ids,
validator=validator,
variables=variables,
)
if not (self.include_column_names and effective_column_names):
raise gx_exceptions.ProfilerExecutionError(
message=f'Error: "column_list" in {self.__class__.__name__} must not be empty.'
)
column_name: str
semantic_types_by_column_name: Dict[str, SemanticDomainTypes] = {
column_name: self.semantic_type_filter.table_column_name_to_inferred_semantic_domain_type_map[ # type: ignore[union-attr] # could be None
column_name
]
for column_name in effective_column_names
}
domains: List[Domain] = [
Domain(
domain_type=self.domain_type,
domain_kwargs={
"column_list": effective_column_names,
},
details={
INFERRED_SEMANTIC_TYPE_KEY: semantic_types_by_column_name,
},
rule_name=rule_name,
),
]
return domains | [
19,
3902
] |
def METHOD_NAME(args: List[str]) -> dict:
"""Executes the Selenium Manager Binary.
:Args:
- args: the components of the command being executed.
:Returns: The log string containing the driver location.
"""
if logger.getEffectiveLevel() == logging.DEBUG:
args.append("--debug")
args.append("--output")
args.append("json")
command = " ".join(args)
logger.debug(f"Executing process: {command}")
try:
if sys.platform == "win32":
completed_proc = subprocess.METHOD_NAME(args, capture_output=True, creationflags=subprocess.CREATE_NO_WINDOW)
else:
completed_proc = subprocess.METHOD_NAME(args, capture_output=True)
stdout = completed_proc.stdout.decode("utf-8").rstrip("\n")
stderr = completed_proc.stderr.decode("utf-8").rstrip("\n")
output = json.loads(stdout)
result = output["result"]
except Exception as err:
raise WebDriverException(f"Unsuccessful command executed: {command}") from err
for item in output["logs"]:
if item["level"] == "WARN":
logger.warning(item["message"])
if item["level"] == "DEBUG" or item["level"] == "INFO":
logger.debug(item["message"])
if completed_proc.returncode:
raise WebDriverException(f"Unsuccessful command executed: {command}.\n{result}{stderr}")
return result | [
22
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Resources represents the SyncSets configuration.
"""
return pulumi.get(self, "resources") | [
1614
] |
def METHOD_NAME(self):
copy(self, "LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) | [
360
] |
def METHOD_NAME(self):
self.io = StringIO() | [
0,
1
] |
def METHOD_NAME():
conf_path = os.getenv("ZOPE_CONF_PATH", "parts/instance/zope.conf")
if conf_path is None or not os.path.exists(conf_path):
raise Exception('Could not find zope.conf at {}'.format(conf_path))
from Zope2 import configure
configure(conf_path)
import Zope2
app = Zope2.app()
from Testing.ZopeTestCase.utils import makerequest
app = makerequest(app)
app.REQUEST['PARENTS'] = [app]
from zope.globalrequest import setRequest
setRequest(app.REQUEST)
from AccessControl.SpecialUsers import system as user
from AccessControl.SecurityManagement import newSecurityManager
newSecurityManager(None, user)
run(app) | [
102,
61,
22
] |
def METHOD_NAME(self, model_name, model):
self.models[model_name] = model | [
238,
578
] |
def METHOD_NAME(self):
request = self.request_factory.get("/test/", SERVER_NAME="press.org")
request.session = {}
user = Account.objects.get(email='[email protected]')
user.preferred_timezone = "Europe/London"
user.save()
request.user = user
response = self.middleware.process_request(request)
self.assertEqual(request.timezone.zone, user.preferred_timezone) | [
9,
21,
3216,
331
] |
def METHOD_NAME(test):
cleanup(test)
config_path = os.path.join(test.testcase_path, 'assignment_config', 'complete_config.json')
try:
test.validate_complete_config(config_path)
except Exception:
traceback.print_exc()
raise | [
135,
437
] |
def METHOD_NAME(self, nqubits_length):
length = int(nqubits_length.split(",")[1])
for i in range(length):
decompose_clifford(self.random_clifford[i]) | [
104,
7426
] |
def METHOD_NAME(spec, state):
pow_chain = prepare_random_pow_chain(spec, 2)
pow_chain.head(-1).total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY + uint256(1)
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block, valid=False) | [
9,
187,
411,
573,
180,
1887,
1019
] |
def METHOD_NAME(self):
"""
`test_regresson_test_three_way_suggestion_merging` above
catches some of this bug, but not all of it.
Turns out the _order_ of the merging matters.
We need to set up:
A -> B
A -> C
B -> C
And then merge A -> B
"""
person_a_id = 35348
person_b_id = 75618
person_c_id = 108860
PersonA = PersonFactory(pk=person_a_id, name="A")
PersonB = PersonFactory(pk=person_b_id, name="B")
PersonC = PersonFactory(pk=person_c_id, name="C")
DuplicateSuggestion.objects.create(
person=PersonA,
other_person=PersonB,
user=self.user,
)
DuplicateSuggestion.objects.create(
person=PersonA,
other_person=PersonC,
user=self.user,
)
DuplicateSuggestion.objects.create(
person=PersonB,
other_person=PersonC,
user=self.user,
)
self.assertEqual(DuplicateSuggestion.objects.count(), 3)
PersonMerger(PersonA, PersonB).merge()
qs = DuplicateSuggestion.objects.all()
self.assertEqual(qs.count(), 1)
self.assertEqual(qs.get().person_id, person_a_id)
self.assertEqual(qs.get().other_person_id, person_c_id) | [
9,
2756,
3504,
1119,
75,
3868
] |
def METHOD_NAME(timeout_seconds: int) -> None:
"""Background thread which will kill the current process if the timeout elapses."""
time.sleep(timeout_seconds)
os.kill(os.getpid(), signal.SIGUSR1) | [
659,
8413
] |
def METHOD_NAME():
frappe.db.add_unique("Bin", ["item_code", "warehouse"], constraint_name="unique_item_warehouse") | [
69,
4284,
86
] |
def METHOD_NAME(self):
return self._meta.verbose_name + ' ' + str(self.id) | [
2893
] |
def METHOD_NAME():
target = lief.ELF.parse(get_sample('ELF/main.relr.elf'))
assert target.get(lief.ELF.DYNAMIC_TAGS.RELA) is not None
assert target.get(lief.ELF.DYNAMIC_TAGS.RELRSZ) is not None
assert target.get(lief.ELF.DYNAMIC_TAGS.RELRENT) is not None | [
9,
946,
-1
] |
METHOD_NAME(self, source: CommandSource): | [
697,
3216,
245
] |
def METHOD_NAME(a, b):
assert type(a) == type(b)
if isinstance(a, pd.DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, pd.Series):
assert_series_equal(a, b)
elif isinstance(a, pd.Index):
assert_index_equal(a, b)
else:
assert a == b | [
638,
926
] |
def METHOD_NAME(self, item):
if 'poi_type' in item and item['poi_type']['id'] == POI_TYPE_ID:
provider = self._find_provider(item)
if provider:
item['stands'] = provider.get_informations(item)
return provider
return None | [
276,
3189
] |
def METHOD_NAME(self):
"""Test without any invitation, continue_flow_without_invitation is set."""
self.stage.continue_flow_without_invitation = True
self.stage.save()
plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()])
plan.context[PLAN_CONTEXT_PENDING_USER] = self.user
plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT
session = self.client.session
session[SESSION_KEY_PLAN] = plan
session.save()
response = self.client.get(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug})
)
self.assertEqual(response.status_code, 200)
self.assertStageRedirects(response, reverse("authentik_core:root-redirect"))
self.stage.continue_flow_without_invitation = False
self.stage.save() | [
9,
529,
4568,
6241
] |
def METHOD_NAME(dut, interface_name=None, stc_type=None, **kwargs):
"""
API to show storm control configuration
Author : Chaitanya Vella ([email protected])
:param dut:
:param interface_name:
:param stc_type:
:param bits_per_sec:
:return:
"""
cli_type = st.get_ui_type(dut, **kwargs)
interface_data = utils.get_interface_number_from_name(interface_name)
if cli_type == 'click':
if not interface_name:
command = "show storm-control all"
else:
command = "show storm-control interface {}".format(
interface_name)
return st.METHOD_NAME(dut, command, type=cli_type)
elif cli_type == 'klish':
if not interface_name:
command = "show storm-control"
else:
command = "show storm-control interface {} {}".format(
interface_data["type"], interface_data["number"])
return st.METHOD_NAME(dut, command, type=cli_type)
elif cli_type in ['rest-put', 'rest-patch']:
rest_urls = st.get_datastore(dut, "rest_urls")
if stc_type == "unknown-multicast":
stc_type = "unknown_multicast"
if stc_type == "unknown-unicast":
stc_type = "unknown_unicast"
url = rest_urls['config_stormcontrol'].format(interface_name, stc_type.upper())
rest_get_output = get_rest(dut, rest_url=url)
actual_data = rest_get_output['output']['openconfig-if-ethernet-ext:config']
temp = {}
output = []
temp['interface'] = actual_data['ifname']
temp['rate'] = actual_data['kbps']
stc_type = (actual_data['storm-type'].lower())
if stc_type == "unknown_multicast":
stc_type = "unknown-multicast"
if stc_type == "unknown_unicast":
stc_type = "unknown-unicast"
temp['type'] = str(stc_type)
output.append(temp)
return output
else:
st.log("invalid cli type")
return False | [
697
] |
def METHOD_NAME(
p: Gaussian, kernel: kernels.Sum, _: None, __: None, ___: None, nghp: None = None
) -> tf.Tensor:
r"""
Compute the expectation:
<\Sum_i diag(Ki_{X, X})>_p(X)
- \Sum_i Ki_{.,.} :: Sum kernel
:return: N
"""
exps = [expectation(p, k, nghp=nghp) for k in kernel.kernels]
return reduce(tf.add, exps) | [
2908,
4008,
912
] |
def METHOD_NAME():
import annotations.annotatedFunctions as m
assert m.wrongTypeInside() == 0
assert len(m.wrongTypeInside.__annotations__) == 0 | [
9,
909,
44,
623,
559
] |
def METHOD_NAME(self, uid: Union[UUID, str]) -> ResourceType:
"""Get a particular element of the collection."""
if uid is None:
raise ValueError("Cannot get when uid=None. Are you using a registered resource?")
path = self._get_path(uid)
data = self.session.get_resource(path, version=self._api_version)
data = data[self._individual_key] if self._individual_key else data
return self.build(data) | [
19
] |
def METHOD_NAME(a, b):
if isinstance(a, BitVec):
return a.urem(b)
elif isinstance(b, BitVec):
return b.rurem(a)
return a % b | [
-1
] |
def METHOD_NAME(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app()) | [
276
] |
def METHOD_NAME(self, address, funcName, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.execSyncFunction()")
command = args['method']['command']
print(f"##execSyncFunction : args['args']['args'] : {args['args']['args']}") | [
1005,
164,
559
] |
def METHOD_NAME(self):
methods = ["OPTIONS", "HEAD", "TRACE", "GET", "PUT", "PATCH", "DELETE"]
url = reverse("autoemails:email_response", args=[1])
for method in methods:
with self.subTest(method=method):
response = self.client.generic(method, path=url)
self.assertEqual(response.status_code, 405)
response = self.client.generic("POST", path=url)
self.assertEqual(response.status_code, 302) # redirect to log in | [
9,
377,
103
] |
def METHOD_NAME(metafunc):
"""Hooks into pytest to add platform and board fixtures to tests that
require them. To make sure that "platform" and "board" are treated as
parameters for the appropriate tests (and included in the test names),
we add them as function level parametrizations. This prevents data
from being overwritten in Junit XML files if multiple platforms
or boards are tested."""
for argument in ["platform", "board"]:
if argument in metafunc.fixturenames:
value = metafunc.config.getoption(f"--{argument}", default=None)
if not value:
raise ValueError(
f"Test {metafunc.function.__name__} in module {metafunc.module.__name__} "
f"requires a --{argument} argument, but none was given."
)
metafunc.parametrize(argument, [metafunc.config.getoption(f"--{argument}")]) | [
2595,
567,
450
] |
def METHOD_NAME(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
if not predictions:
raise ValueError("No predictions provided")
elif not references:
raise ValueError("No references provided")
predictions = dict(map(self._convert_pred_to_entry, predictions))
references = dict(map(self._convert_ref_to_entry, references))
# TODO: parameterize
skip_missing_example_ids = False
long_non_null_threshold = 2
short_non_null_threshold = 2
long_answer_stats, short_answer_stats = score_answers(
gold_annotation_dict=references, pred_dict=predictions,
skip_missing_example_ids=skip_missing_example_ids,
long_non_null_threshold=long_non_null_threshold,
short_non_null_threshold=short_non_null_threshold)
metrics = pretty_print(long_answer_stats=long_answer_stats, short_answer_stats=short_answer_stats)
return metrics | [
226
] |
def METHOD_NAME(
location, # type: str
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/vmSizes") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
) | [
56,
245,
377
] |
def METHOD_NAME(self, tir):
jir = self._to_java_table_ir(tir)
return ttable._from_java(jir.typ()) | [
410,
44
] |
def METHOD_NAME(graph, nodes, samples):
assert not graph.is_tensor(), "You must call Graph.numpy() first."
graph_list = []
for max_deg in samples:
start_nodes = copy.deepcopy(nodes)
edges = []
if max_deg == -1:
pred_nodes = graph.predecessor(start_nodes)
else:
pred_nodes = graph.sample_predecessor(
start_nodes, max_degree=max_deg)
for dst_node, src_nodes in zip(start_nodes, pred_nodes):
for src_node in src_nodes:
edges.append((src_node, dst_node))
neigh_nodes = [start_nodes, pred_nodes]
neigh_nodes = flat_node_and_edge(neigh_nodes)
from_reindex = {x: i for i, x in enumerate(neigh_nodes)}
sub_node_index = graph_kernel.map_nodes(nodes, from_reindex)
sg = subgraph(
graph,
nodes=neigh_nodes,
edges=edges,
with_node_feat=False,
with_edge_feat=False)
# sg = add_self_loop(sg, sub_node_index)
sg = add_self_loop(sg, sub_node_index)
graph_list.append((sg, neigh_nodes, sub_node_index))
nodes = neigh_nodes
graph_list = graph_list[::-1]
return graph_list | [
6478,
734
] |
def METHOD_NAME(func):
"""
Wrapper around run() methods that reads the *slow* flag to decide whether to wait some seconds
for illustrative purposes. This is very straight forward, so no need for ``functools.wraps`` or
``law.decorator.factory`` here.
"""
def wrapper(self, *args, **kwargs):
if self.slow:
time.sleep(random.randint(5, 15))
return func(self, *args, **kwargs)
return wrapper | [
2946,
618
] |
def METHOD_NAME():
class TestCase(HasTraits):
value = TypedTuple(trait=Int(), default_value=(1, 2, 'foobar'))
with pytest.raises(TraitError):
obj = TestCase()
a = obj.value # a read might be needed to trigger default validation | [
9,
3499,
1815,
1068,
235
] |
def METHOD_NAME(self, name):
"""
Returns the total size, in bytes, of the file referenced by name.
"""
try:
properties = self._get_properties(name)
return int(properties['content-length'])
except AzureMissingResourceHttpError:
pass | [
1318
] |
def METHOD_NAME(a):
"""1.0/a (inplace on a)""" | [
11981,
5920
] |
def METHOD_NAME(*args, **kwargs):
parser.METHOD_NAME() | [
38,
40
] |
def METHOD_NAME(self, indent):
return indent + (' ' * self.INDENT_SIZE) | [
2978,
4
] |
def METHOD_NAME(required_use):
o = ebuild(None, "dev-util/diffball-0.1-r1")
object.__setattr__(o, "eapi", get_eapi("8", suppress_unsupported=True))
object.__setattr__(o, "data", {"REQUIRED_USE": required_use})
return o.required_use | [
214
] |
def METHOD_NAME(name, **kwargs):
try:
return gcloud("compute", "instance-groups", "managed", "create", name, **kwargs)
except subprocess.CalledProcessError as e:
raise Exception('"{}" returned unexpected error:\n{}'.format(e.cmd, e.stderr)) | [
129,
89,
846
] |
def METHOD_NAME():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version',
version='%(prog)s ' + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
parser.add_argument('-o', metavar='filename',
action='store', dest='output_filename_trunk',
default=None, help=helps['filename'])
parser.add_argument('-d', '--dump', action='store_true', dest='dump',
default=False, help=helps['dump'])
parser.add_argument('--same-dir', action='store_true', dest='same_dir',
default=False, help=helps['same_dir'])
parser.add_argument('-l', '--linearization', metavar='options',
action='store', dest='linearization',
default=None, help=helps['linearization'])
parser.add_argument('--times', action='store_true', dest='times',
default=False, help=helps['times'])
parser.add_argument('-f', '--from', type=int, metavar='ii',
action='store', dest='step_from',
default=0, help=helps['from'])
parser.add_argument('-t', '--to', type=int, metavar='ii',
action='store', dest='step_to',
default=None, help=helps['to'])
parser.add_argument('-s', '--step', type=int, metavar='ii',
action='store', dest='step_by',
default=1, help=helps['step'])
parser.add_argument('-e', '--extract', metavar='list',
action='store', dest='extract',
default=None, help=helps['extract'])
parser.add_argument('-a', '--average', action='store_true',
dest='average', default=False, help=helps['average'])
parser.add_argument('input_file', nargs='?', default=None)
parser.add_argument('results_file')
options = parser.parse_args()
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
filename_in = options.input_file
filename_results = options.results_file
if filename_in is None:
linearize = False
else:
linearize = True
options.dump = True
if options.times:
steps, times, nts, dts = th.extract_times(filename_results)
for ii, time in enumerate(times):
step = steps[ii]
print('%d %e %e %e' % (step, time, nts[ii], dts[ii]))
if options.dump:
trunk = get_default(options.output_filename_trunk,
get_trunk(filename_results))
if options.same_dir:
trunk = os.path.join(os.path.dirname(filename_results),
os.path.basename(trunk))
args = {}
if linearize:
problem = create_problem(filename_in)
linearization = Struct(kind='adaptive', min_level=0,
max_level=2, eps=1e-2)
aux = problem.conf.options.get('linearization', None)
linearization.update(aux)
if options.linearization is not None:
aux = parse_linearization(options.linearization)
linearization.update(aux)
args.update({'fields' : problem.fields,
'linearization' : linearization})
if options.step_to is None:
args.update({'step0' : options.step_from})
else:
args.update({'steps' : nm.arange(options.step_from,
options.step_to + 1,
options.step_by, dtype=nm.int32)})
th.dump_to_vtk(filename_results, output_filename_trunk=trunk, **args)
if options.extract:
ths, ts = th.extract_time_history(filename_results, options.extract)
if options.average:
ths = th.average_vertex_var_in_cells(ths)
if options.output_filename_trunk:
th.save_time_history(ths, ts, options.output_filename_trunk + '.h5')
else:
print(dict_to_struct(ths, flag=(1, 1, 1)).str_all()) | [
57
] |
def METHOD_NAME(self, name, cat=None):
try:
mem = self.get_member(name, cat=cat)
return True
except self.NoSuchMember:
return False | [
220,
1823
] |
def METHOD_NAME(self):
""" Get the current connection object. """
return self._getCurrentContext()[CXT_CONNECTION] | [
19,
550
] |
def METHOD_NAME(
file_path: str, target_name: str
) -> Tuple[np.ndarray, np.ndarray, list]:
"""Reads data from CSV file and prepares it for model fitting.
Args:
file_path (str): relative path to CSV file
target_name (str): name of dependent variable in the dataset
Returns:
Tuple[np.ndarray, np.ndarray, list, str]:
1. n-dim array of independent variables
2. 1-dim array of dependent variable
3. List of feature names
4. Name of dependent variable (updated in case of demo dataset)
"""
# read data from file as pandas dataframe
try:
data = pd.read_csv(file_path)
except FileNotFoundError:
sys.exit("Invalid path provided.")
# separate features and target
if target_name:
y = np.array(data.pop(target_name))
else:
target_name = data.columns[-1]
y = np.array(data.pop(data.columns[-1]))
features = list(data.columns)
X = np.array(data)
# add bias term in the form of constant column
X = np.column_stack((X, np.ones(len(X))))
features.append("constant")
return X, y, features, target_name | [
123,
365
] |
def METHOD_NAME(request, app: str = None):
if request.body and app == "video": # we develop only video statement
body_unicode = request.body.decode("utf-8")
body = json.loads(body_unicode)
METHOD_NAME = {
"actor": {
"name": "%s" % get_actor_name(request),
"mbox": "mailto:%s" % get_actor_mail(request),
},
"id": str(uuid.uuid4()),
}
for key, value in body.items():
METHOD_NAME[key] = value
if validate_statement(METHOD_NAME) and XAPI_LRS_URL != "":
send_xapi_statement_task.delay(METHOD_NAME)
return JsonResponse(METHOD_NAME, safe=False)
raise SuspiciousOperation(
"none post data was sent and app parameter has to be equals to video"
) | [
925
] |
def METHOD_NAME(self) -> None:
cr = bahc.CodeRunner("# test", "/foo/__init__.py", [])
package = cr.new_module()
cr = bahc.CodeRunner("# test", "path", [], package=package)
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'] == abspath("path")
assert m.__dict__['__package__'] == package.__dict__["__name__"] | [
9,
80,
298,
360
] |
def METHOD_NAME(self) -> None:
self._client.METHOD_NAME() | [
1462
] |
def METHOD_NAME(self, request):
return self.location_cache.can_use_multiple_write_locations_for_request(request) | [
1046,
1080,
107,
77,
1081
] |
def METHOD_NAME(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-09-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str", min_length=1),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) | [
56,
245,
377
] |
def METHOD_NAME(self, step): # pylint: disable=redefined-outer-name
assert step not in self._stack
self._stack.append(step)
step.parent = self.get_current()
step.level = len(self._stack) | [
1013
] |
def METHOD_NAME(self, width, height):
# determine new height and width
scale_height = self.__height / height
scale_width = self.__width / width
if self.__keep_aspect_ratio:
if self.__resize_method == "lower_bound":
# scale such that output size is lower bound
if scale_width > scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "upper_bound":
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "minimal":
# scale as least as possbile
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
else:
raise ValueError(
f"resize_method {self.__resize_method} not implemented"
)
if self.__resize_method == "lower_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, min_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, min_val=self.__width
)
elif self.__resize_method == "upper_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, max_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, max_val=self.__width
)
elif self.__resize_method == "minimal":
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
else:
raise ValueError(f"resize_method {self.__resize_method} not implemented")
return (new_width, new_height) | [
19,
1318
] |
def METHOD_NAME(self):
"""Does it support encrypted key files."""
return False | [
1186,
2897
] |
def METHOD_NAME(storage_id: str) -> None:
assert storage_id not in os.listdir(manager._base_path) | [
72,
34,
905
] |
def METHOD_NAME(path: Path, payload: str) -> None:
with open(path, "w", encoding="utf-8") as fd:
fd.write(payload) | [
77,
171
] |
def METHOD_NAME(self) -> Optional['outputs.JobStepOutputResponse']:
"""
Output destination properties of the job step.
"""
return pulumi.get(self, "output") | [
146
] |
def METHOD_NAME(self, squadron: Squadron) -> None:
self.squadrons[squadron.aircraft].append(squadron) | [
238,
12854
] |
def METHOD_NAME(self):
self.dumps = cPickle.dumps
self.loads = cPickle.loads | [
0,
1
] |
def METHOD_NAME(self, tmp_path: Path) -> None:
"""Test parse_params."""
params = ParametersDataModel(
src=str(tmp_path),
dest="s3://dest/",
dir_op=True,
exclude=["exclude/*"],
include=["include/*"],
)
result = Filter.parse_params(params)
assert isinstance(result, Filter)
assert result.patterns == [
FilterPattern("exclude", f"{tmp_path}{os.sep}exclude/*"),
FilterPattern("include", f"{tmp_path}{os.sep}include/*"),
]
assert result.dest_patterns == [
FilterPattern("exclude", "dest/exclude/*"),
FilterPattern("include", "dest/include/*"),
] | [
9,
214,
434
] |
def METHOD_NAME(self, variables, key):
if key in self.provision_config_instance.variables["origin"]["docker"]:
variables[key] = self.provision_config_instance.variables["origin"]["docker"][key] | [
238,
217,
973,
43,
6203,
200,
89
] |
def METHOD_NAME(self):
response = milestones_helpers.get_course_content_milestones(
str(self.course.id),
'i4x://doesnt/matter/for/this/test',
'requires'
)
assert len(response) == 0 | [
9,
19,
1122,
459,
8059,
610,
98
] |
def METHOD_NAME(self, org, role=None):
org_id = org.org_id
try:
settings = self.get(org_id=org_id)
except OrgSettings.DoesNotExist:
settings = self.model(org_id=org_id)
if role is not None:
if role in get_available_roles():
settings.role = role
else:
logger.warning('Role %s is not valid' % role)
settings.save(using=self._db)
return settings | [
238,
894,
86
] |
def METHOD_NAME(self, component_size: list[int], u_node: int, v_node: int) -> None:
"""Union finds the roots of components for two nodes, compares the components
in terms of size, and attaches the smaller one to the larger one to form
single component"""
if component_size[u_node] <= component_size[v_node]:
self.m_component[u_node] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(u_node)
elif component_size[u_node] >= component_size[v_node]:
self.m_component[v_node] = self.find_component(u_node)
component_size[u_node] += component_size[v_node]
self.set_component(v_node) | [
3006
] |
def METHOD_NAME(self):
sync_with_settings()
self.assertEqual(Schedule.objects.filter(schedule_type=Schedule.CRON).count(), 3) | [
9,
164,
41,
35,
1267
] |
def METHOD_NAME(move_requests, accept, reason=''):
"""Send email notifications when a move request is accepted/rejected.
:param move_requests: List of `EventMoveRequest` that were accepted/rejected.
:param accept: Whether the requests were accepted.
:param reason: Optional reason for rejection.
"""
move_requests = sorted(move_requests, key=attrgetter('requestor.id', 'category.id'))
for (requestor, category), requests in itertools.groupby(move_requests, attrgetter('requestor', 'category')):
events = [rq.event for rq in requests]
with requestor.force_user_locale():
template = get_template_module('events/emails/move_request_closure.txt',
events=events, target_category=category, accept=accept, reason=reason)
email = make_email(to_list=requestor.email, template=template)
send_email(email) | [
959,
132,
377,
1145
] |
def METHOD_NAME(self):
with pytest.raises(TypeError):
self.encoder.default(None)
with pytest.raises(TypeError):
self.encoder.default({}) | [
9,
13955
] |
def METHOD_NAME(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(domain):
"""
Start a defined domain
CLI Example:
.. code-block:: bash
salt '*' virt.start <domain>
"""
if domain in list_active_vms():
raise CommandExecutionError("The specified vm is already running")
__salt__["vmadm.start"](domain)
return domain in list_active_vms() | [
447
] |
def METHOD_NAME(self, structure_tstep, aero_tstep):
# called by DynamicCoupled
if self.settings['update_grid']:
self.data.aero.generate_zeta_timestep_info(structure_tstep,
aero_tstep,
self.data.structure,
self.data.aero.aero_settings,
dt=None) | [
86,
343,
753
] |
def METHOD_NAME(self):
# Create some observations.
obs = list([])
obs2 = list([])
obs_total = list([])
obs.append(Vector3d(1.0, 1.0, 0.0))
obs.append(Vector3d(1.1, 1.0, 0.0))
obs.append(Vector3d(1.2, 1.0, 0.0))
obs.append(Vector3d(1.3, 1.0, 0.0))
obs.append(Vector3d(1.4, 1.0, 0.0))
obs2.append(Vector3d(5.0, 1.0, 0.0))
obs2.append(Vector3d(5.1, 1.0, 0.0))
obs2.append(Vector3d(5.2, 1.0, 0.0))
obs2.append(Vector3d(5.3, 1.0, 0.0))
obs2.append(Vector3d(5.4, 1.0, 0.0))
for elem in obs:
obs_total.append(elem)
for elem in obs2:
obs_total.append(elem)
# Initialize Kmeans with two partitions.
kmeans = Kmeans(obs)
kmeans.append_observations(obs2)
obs_copy = kmeans.observations()
for i in range(len(obs_copy)):
self.assertEqual(obs_total[i], obs_copy[i])
# Append an empty vector.
emptyVector = []
self.assertFalse(kmeans.append_observations(emptyVector)) | [
9,
7667,
1459
] |
def METHOD_NAME(df, geom_col="geometry", inplace=False, _func=None, **kwargs):
"""Atomic operation internal function."""
outval = df[geom_col].apply(lambda x: _func(x, **kwargs))
outcol = "shape_{}".format(_func.__name__)
if not inplace:
new = df.copy()
new[outcol] = outval
return new
df[outcol] = outval | [
956,
441
] |
def METHOD_NAME():
return "Device-Control", "Newly-Controller" | [
8681
] |
def METHOD_NAME(self, image_uri: str) -> bool:
"""Check if image exists in container registry.
Args:
image_uri (str): Image URI to check.
Returns:
bool: True if image exists, False otherwise.
"""
credential = self.environment.get_credentials()
registry, repository, tag = self.parse_azurecr_uri(image_uri)
client = ContainerRegistryClient(f"https://{registry}.azurecr.io", credential)
try:
client.get_manifest_properties(repository, tag)
return True
except ResourceNotFoundError:
return False
except Exception as e:
raise LaunchError(
f"Unable to check if image exists in Azure Container Registry: {e}"
) from e | [
250,
660,
954
] |
def METHOD_NAME(store: Union[str, MutableMapping],
insert_index: int,
time_slice: xr.Dataset,
mode: str,
chunk_sizes: Dict[str, int] = None):
"""
Update existing zarr dataset by new time slice.
:param store: A zarr store.
:param insert_index: Time index
:param time_slice: Time slice to insert
:param mode: Update mode, 'insert' or 'replace'
:param chunk_sizes: desired chunk sizes
"""
if mode not in ('insert', 'replace'):
raise ValueError(f'illegal mode value: {mode!r}')
insert_mode = mode == 'insert'
time_var_names = []
encoding = {}
with xr.open_zarr(store) as cube:
for var_name in cube.variables:
var = cube[var_name]
if var.ndim >= 1 and 'time' in var.dims:
if var.dims[0] != 'time':
raise ValueError(f"dimension 'time' of variable {var_name!r} must be first dimension")
time_var_names.append(var_name)
enc = dict(cube[var_name].encoding)
# xarray 0.17+ supports engine preferred chunks if exposed by the backend
# zarr does that, but when we use the new 'preferred_chunks' when writing to zarr
# it raises and says, 'preferred_chunks' is an unsupported encoding
if 'preferred_chunks' in enc:
del enc['preferred_chunks']
encoding[var_name] = enc
if chunk_sizes:
time_slice = chunk_dataset(time_slice, chunk_sizes, format_name='zarr')
temp_dir = tempfile.TemporaryDirectory(prefix='xcube-time-slice-', suffix='.zarr')
time_slice.to_zarr(temp_dir.name, encoding=encoding)
slice_root_group = zarr.open(temp_dir.name, mode='r')
slice_arrays = dict(slice_root_group.arrays())
cube_root_group = zarr.open(store, mode='r+')
for var_name, var_array in cube_root_group.arrays():
if var_name in time_var_names:
slice_array = slice_arrays[var_name]
if insert_mode:
# Add one empty time step
empty = zarr.creation.empty(slice_array.shape, dtype=var_array.dtype)
var_array.append(empty, axis=0)
# Shift contents
var_array[insert_index + 1:, ...] = var_array[insert_index:-1, ...]
# Replace slice
var_array[insert_index, ...] = slice_array[0]
unchunk_dataset(store, coords_only=True) | [
86,
104,
55
] |
def METHOD_NAME(self):
args = parse_date_args("55.1345678", "%S")
self.assertEqual(55, next(args))
self.assertEqual(134567800, next(args))
self.assertIsNone(next(args, None)) | [
9,
214,
153,
335,
-1
] |
METHOD_NAME(self): | [
19,
199
] |
def METHOD_NAME(self):
# create a certificate with a device id that contains all valid characters
sub1 = self.startProcess(
command=self.sudo,
arguments=[
self.tedge,
"cert",
"create",
"--device-id",
"'?=()*@!%,-.123ThinEdgeDevice-id",
],
stdouterr="cert_create",
)
# upload the certificate
cert_upload = self.startProcess(
environs={"C8YPASS": self.project.c8ypass},
command=self.sudo,
arguments=[
"-E",
self.tedge,
"cert",
"upload",
"c8y",
"--user",
self.project.c8yusername,
],
stdouterr="cert_upload",
)
time.sleep(1)
# connect to the c8y cloud
self.tedge_connect_c8y()
# test connect to the c8y cloud
self.tedge_connect_c8y_test() | [
750
] |
def METHOD_NAME(prop_name):
def fun(self):
if prop_name in self.other_props:
return getattr(self.phases[self.other_props[prop_name]], prop_name)()
if self.thermal is not None:
return getattr(self.phases[self.thermal], prop_name)()
raise ValueError("No method specified")
return fun | [
-1
] |
def METHOD_NAME(request) -> bool:
"""
Check if the request is a browser visiting this
as the main url, rather than requesting an image.
"""
accepted_headers = request.headers.get("Accept")
logger.info(f"accepted_headers - {accepted_headers}")
if accepted_headers:
return "text/html" in accepted_headers or "*/*" in accepted_headers | [
250,
43,
2073,
716
] |
def METHOD_NAME(setting_inheritance_manager, mocked_stack):
# Setting 1 doesn't have a user state, so it can't have an override
assert not setting_inheritance_manager._settingIsOverwritingInheritance("setting_1", mocked_stack) | [
9,
1333,
137,
13320,
380,
654,
21
] |
async def METHOD_NAME():
async with AutoRestRequiredOptionalTestService(
"required_path",
"required_query",
) as METHOD_NAME:
METHOD_NAME._config.required_global_path = None
METHOD_NAME._config.required_global_query = None
await yield_(METHOD_NAME) | [
340
] |
def METHOD_NAME(self) -> bool:
try:
return self.conf.get(ElasticsearchBaseExtractor.ELASTICSEARCH_EXTRACT_TECHNICAL_DETAILS)
except Exception:
return False | [
297,
15425,
2051
] |
def METHOD_NAME(make_napari_viewer):
"""Test basic add_dock_widget functionality"""
viewer = make_napari_viewer()
widg = QPushButton('button')
dwidg = viewer.window.add_dock_widget(widg, name='test', area='bottom')
assert not dwidg.is_vertical
assert viewer.window._qt_window.findChild(QDockWidget, 'test')
assert dwidg.widget() == widg
dwidg._on_visibility_changed(True) # smoke test
widg2 = QPushButton('button')
dwidg2 = viewer.window.add_dock_widget(widg2, name='test2', area='right')
assert dwidg2.is_vertical
assert viewer.window._qt_window.findChild(QDockWidget, 'test2')
assert dwidg2.widget() == widg2
dwidg2._on_visibility_changed(True) # smoke test
with pytest.raises(ValueError):
# 'under' is not a valid area
viewer.window.add_dock_widget(widg2, name='test2', area='under')
with pytest.raises(ValueError):
# 'under' is not a valid area
viewer.window.add_dock_widget(
widg2, name='test2', allowed_areas=['under']
)
with pytest.raises(TypeError):
# allowed_areas must be a list
viewer.window.add_dock_widget(
widg2, name='test2', allowed_areas='under'
) | [
9,
238,
5134,
706
] |
async def METHOD_NAME(self, request: web.Request):
await self.delete_object(request)
return web.HTTPNoContent() | [
34,
507
] |
def METHOD_NAME() -> None:
"""Load data, create and start CIFAR-10/100 client."""
args = parse_args()
client_setting = get_client_setting(args.setting, args.cid)
# Configure logger
configure(identifier=f"client:{client_setting.cid}", host=args.log_host)
log(INFO, "Starting client, settings: %s", client_setting)
# Load model
model = resnet50v2(input_shape=(32, 32, 3), num_classes=NUM_CLASSES, seed=SEED)
# Load local data partition
(xy_train_partitions, xy_test_partitions), _ = tf_cifar_partitioned.load_data(
iid_fraction=client_setting.iid_fraction,
num_partitions=client_setting.num_clients,
cifar100=False,
)
x_train, y_train = xy_train_partitions[client_setting.partition]
x_test, y_test = xy_test_partitions[client_setting.partition]
if client_setting.dry_run:
x_train = x_train[0:100]
y_train = y_train[0:100]
x_test = x_test[0:50]
y_test = y_test[0:50]
# Start client
client = VisionClassificationClient(
client_setting.cid,
model,
(x_train, y_train),
(x_test, y_test),
client_setting.delay_factor,
NUM_CLASSES,
augment=True,
augment_horizontal_flip=True,
augment_offset=2,
)
fl.client.start_client(args.server_address, client) | [
57
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.