text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, use_naive_parallel_network, reward_dim,
use_n_step_td, min_critic_by_critic_mean):
num_env = 1
config = TrainerConfig(
root_dir="dummy",
unroll_length=1,
mini_batch_length=4,
mini_batch_size=64,
initial_collect_steps=500,
whole_replay_buffer_training=False,
clear_replay_buffer=False)
env_class = PolicyUnittestEnv
steps_per_episode = 13
env = env_class(
num_env,
steps_per_episode,
action_type=ActionType.Continuous,
reward_dim=reward_dim)
eval_env = env_class(
100,
steps_per_episode,
action_type=ActionType.Continuous,
reward_dim=reward_dim)
obs_spec = env._observation_spec
action_spec = env._action_spec
reward_spec = env._reward_spec
fc_layer_params = (10, 10)
continuous_projection_net_ctor = partial(
alf.nn.NormalProjectionNetwork,
state_dependent_std=True,
scale_distribution=True,
std_transform=clipped_exp)
actor_network = partial(
alf.nn.ActorDistributionNetwork,
fc_layer_params=fc_layer_params,
continuous_projection_net_ctor=continuous_projection_net_ctor)
num_quantiles = 50
critic_network = partial(
alf.nn.CriticNetwork,
output_tensor_spec=TensorSpec((num_quantiles, )),
joint_fc_layer_params=fc_layer_params,
use_naive_parallel_network=use_naive_parallel_network)
if use_n_step_td:
td_qr_loss_ctor = TDQRLoss
else:
td_qr_loss_ctor = OneStepTDQRLoss
critic_loss = partial(td_qr_loss_ctor, num_quantiles=num_quantiles)
alg = QrsacAlgorithm(
observation_spec=obs_spec,
action_spec=action_spec,
reward_spec=reward_spec,
actor_network_cls=actor_network,
critic_network_cls=critic_network,
critic_loss_ctor=critic_loss,
min_critic_by_critic_mean=min_critic_by_critic_mean,
use_entropy_reward=reward_dim == 1,
env=env,
config=config,
actor_optimizer=alf.optimizers.Adam(lr=1e-2),
critic_optimizer=alf.optimizers.Adam(lr=1e-2),
alpha_optimizer=alf.optimizers.Adam(lr=1e-2),
debug_summaries=False,
name="MyQRSAC")
eval_env.reset()
for i in range(700):
alg.train_iter()
if i < config.initial_collect_steps:
continue
eval_env.reset()
eval_time_step = unroll(eval_env, alg, steps_per_episode - 1)
logging.log_every_n_seconds(
logging.INFO,
"%d reward=%f" % (i, float(eval_time_step.reward.mean())),
n_seconds=1)
self.assertAlmostEqual(
1.0, float(eval_time_step.reward.mean()), delta=0.3) | [
9,
-1,
4089
] |
def METHOD_NAME(seconds):
""" Report on system up-time in days, hours and minutes """
days = int(seconds / (60 * 60 * 24))
minutes = int(seconds / 60)
hours = int(minutes / 60)
hours = int(hours % 24)
minutes = int(minutes % 60)
result = " up"
if days > 1:
result += " %d days," % days
elif days != 0:
result += " 1 day,"
if hours != 0:
result += ' %2d:%02d,' % (hours, minutes)
else:
result += ' %d min,' % minutes
return result | [
38,
12975
] |
f METHOD_NAME(self): | [
9,
2111,
1268,
5820,
104,
3901
] |
def METHOD_NAME(self):
""" Return total time-range all records span across. """
return TimeRange(min(record.time.start for record in self if record.time.start is not None),
max(record.time.end for record in self if record.time.end is not None)) | [
104,
661
] |
def METHOD_NAME(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-04-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/features") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
) | [
56,
245,
377
] |
def METHOD_NAME(
webhook: WebhookModels, data: typing.Mapping, webhook_type: WebhookType
):
try:
res = _call_webhook(webhook, data)
except requests.exceptions.RequestException as exc:
send_failure_email(
webhook,
data,
webhook_type,
f"N/A ({exc.__class__.__name__})",
)
return
if res.status_code != 200:
send_failure_email(webhook, data, webhook_type, res.status_code) | [
128,
12,
487,
69,
168
] |
def METHOD_NAME(self):
"""Test json serialization."""
as_json = splits.from_raw(self.raw).to_json()
assert isinstance(as_json, dict)
assert as_json['changeNumber'] == 123
assert as_json['trafficTypeName'] == 'user'
assert as_json['name'] == 'some_name'
assert as_json['trafficAllocation'] == 100
assert as_json['trafficAllocationSeed'] == 123456
assert as_json['seed'] == 321654
assert as_json['status'] == 'ACTIVE'
assert as_json['killed'] is False
assert as_json['defaultTreatment'] == 'off'
assert as_json['algo'] == 2
assert len(as_json['conditions']) == 2 | [
9,
24,
763
] |
def METHOD_NAME(self):
tc = TestCase()
assert_equal(tc.setup.config(name='S').id, 't1-k1')
assert_equal(tc.teardown.config(name='T').id, 't1-k2')
tc.body = [BodyItem(), BodyItem(), BodyItem()]
assert_equal([item.id for item in tc.body], ['t1-k2', 't1-k3', 't1-k4'])
assert_equal(tc.setup.id, 't1-k1')
assert_equal(tc.teardown.id, 't1-k5') | [
9,
147,
41,
935,
15177,
102,
61
] |
def METHOD_NAME(value):
if isinstance(value, list):
value = [METHOD_NAME(v) for v in value]
elif isinstance(value, set):
value = set([METHOD_NAME(v) for v in value])
elif isinstance(value, str):
value = escape(value)
return value | [
4748,
99
] |
def METHOD_NAME(self, env):
if self.spec.satisfies("%gcc@10:"):
env.append_flags("CFLAGS", "-fcommon")
env.append_flags("FFLAGS", "-fcommon") | [
102,
56,
1027
] |
async def METHOD_NAME(self, **kwargs):
"""Turn on the vacuum cleaner."""
if self._power_dps:
await self._power_dps.async_set_value(self._device, True) | [
958,
6553,
69
] |
def METHOD_NAME(qw, qx, qy, qz):
'''
Translates from Quaternion to Pitch.
@param qw,qx,qy,qz: Quaternion values
@type qw,qx,qy,qz: float
@return Pitch value translated from Quaternion
'''
rotateYa0=-2.0*(qx*qz - qw*qy)
rotateY=0.0
if(rotateYa0 >= 1.0):
rotateY = pi/2.0
elif(rotateYa0 <= -1.0):
rotateY = -pi/2.0
else:
rotateY = asin(rotateYa0)
return rotateY | [
2729,
2495
] |
def METHOD_NAME(in_dict, hold_dict, hold_cycles, sim=None):
# TODO: write param and return descriptions
""" Simulation of a circuit that takes multiple cycles to complete.
:param in_dict:
:param hold_dict:
:param hold_cycles:
:param sim:
:return:
"""
if sim is None:
sim = pyrtl.Simulation(tracer=pyrtl.SimulationTrace())
sim.step(in_dict)
for i in range(hold_cycles):
sim.step(hold_dict)
return sim.tracer.trace[-1] | [
1919,
-1
] |
def METHOD_NAME(self):
if self.args.action == 'create':
self.create_template()
else:
raise ValueError('The parameter of action must be in [create]') | [
750
] |
def METHOD_NAME(
self, namespace: str, key: str, user: Optional[User]
) -> StorageDestination:
return StorageDestination(
uri=f"sematic:///api/v1/storage/{namespace}/{key}/local",
request_headers=_make_headers(user),
) | [
19,
77,
3836
] |
def METHOD_NAME(dep):
date = dep.get("DA")
time = dep.get("TI")
# cannot use DateTime because the data needs to be json serializable
departure_datetime_utc = make_datetime_json_serializable(date, time)
value = {
"departureDatetimeUtc": departure_datetime_utc,
"departurePort": dep.get("PO"),
"anticipatedActivity": dep.get("AA"),
}
children = tagged_children(dep)
if "GEA" in children:
gear = [parse_gea(gea) for gea in children["GEA"]]
value["gearOnboard"] = gear
if "SPE" in children:
species_onboard = [parse_spe(spe) for spe in children["SPE"]]
value["speciesOnboard"] = species_onboard
data = {"log_type": "DEP", "value": value}
return data | [
214,
161
] |
def METHOD_NAME():
a: i32
a = sign(-3)
assert a == -1
assert sign(0) == 0
f: f32
f = -f32(3.0)
assert sign(f) == -f32(1.0)
f = f32(235.4142135623730951)
assert sign(f) == f32(1.0)
a2: i64
a2 = sign(i64(3))
assert a2 == i64(1)
f2: f64
f2 = -3.0
assert sign(f2) == -1.0 | [
9,
2452
] |
def METHOD_NAME(self, in_timestamp: Optional[OpenTimestamp]) -> int:
"""Returns the real timestamp that should be used to open an object."""
if in_timestamp is not None:
return to_timestamp_ms(in_timestamp)
if self.timestamp_ms is not None:
return self.timestamp_ms
return int(time.time() * 1000) | [
1452,
2722,
3665
] |
def METHOD_NAME(lyrics):
from lxml import etree
xml = etree.XML(u'<lyrics></lyrics>')
etree.SubElement(xml, "artist").text = lyrics.artist
etree.SubElement(xml, "album").text = lyrics.album
etree.SubElement(xml, "title").text = lyrics.title
etree.SubElement(xml, "syncronized").text = 'True' if __syncronized__ else 'False'
etree.SubElement(xml, "grabber").text = lyrics.source
lines = lyrics.lyrics.splitlines()
for line in lines:
etree.SubElement(xml, "lyric").text = line
utilities.log(True, utilities.convert_etree(etree.tostring(xml, encoding='UTF-8',
pretty_print=True, xml_declaration=True))) | [
56,
10429
] |
def METHOD_NAME(self, text, split=True): ... | [
77,
2152,
5659
] |
def METHOD_NAME():
return 2 | [
1413,
1603
] |
def METHOD_NAME(self):
A = AccumulatorIntegrator(increment = np.array([0.1, 0.5, 0.9]))
A()
A()
A()
val = A()
np.testing.assert_allclose([[0.4, 2., 3.6]], val) | [
9,
3464,
3375,
2978,
1636
] |
def METHOD_NAME(config):
model_data = MODELS[config[CONF_MODEL]]
presets = model_data[MODEL_PRESETS]
for key, value in presets.items():
if key not in config:
if key.endswith("pin"):
# All pins are output.
value = pins.gpio_output_pin_schema(value)
config[key] = value
if model_data[REQUIRE_PS] and CONF_POWER_SUPPLY not in config:
raise cv.Invalid(
f'{CONF_POWER_SUPPLY} must be specified when {CONF_MODEL} is {config[CONF_MODEL]}"'
)
if (
CONF_OFFSET_WIDTH not in config
or CONF_OFFSET_HEIGHT not in config
or CONF_HEIGHT not in config
or CONF_WIDTH not in config
):
raise cv.Invalid(
f"{CONF_HEIGHT}, {CONF_WIDTH}, {CONF_OFFSET_HEIGHT} and {CONF_OFFSET_WIDTH} must all be specified"
)
if CONF_DC_PIN not in config or CONF_RESET_PIN not in config:
raise cv.Invalid(f"both {CONF_DC_PIN} and {CONF_RESET_PIN} must be specified")
return config | [
187,
-1
] |
def METHOD_NAME(self, l, h, r, move=None):
t = self.thickness
s = self.spacing
tw, th = l/2+2.5*t+3*s, h+1.5*t+3*s
if self.move(tw, th, move, True):
return
self.moveTo(2.5*t+s, 0)
self.polyline(l/2-r, (90, r+t), h-r, 90, t, 90, h-r, (-90, r), l/2-r, 90, t, 90)
self.moveTo(-t-s, t+s)
self.polyline(l/2-r, (90, r+t), h-r, 90, t, 90, h-r, (-90, r), l/2-r, 90, t, 90)
self.moveTo(+t-s, t+s)
self.polyline(l/2-r, (90, r-1.5*t), h-r, 90, t, 90, h-r, (-90, r-2.5*t), l/2-r, 90, t, 90)
self.moveTo(-t-s, t+s)
self.polyline(l/2-r, (90, r-1.5*t), h-r, 90, t, 90, h-r, (-90, r-2.5*t), l/2-r, 90, t, 90)
self.move(tw, th, move) | [
12592
] |
def METHOD_NAME(self):
self.assertNotIn("name1", self.registry)
self.assertNotIn("name2", self.registry)
self.n1_called = False
self.n2_called = False
@self.registry("name1")
def some_registration1():
self.n1_called = True
@self.registry("name2")
def some_registration2():
self.n2_called = True
self.assertIn("name1", self.registry)
self.assertEqual(some_registration1, self.registry["name1"])
self.assertIn("name2", self.registry)
self.assertEqual(some_registration2, self.registry["name2"])
self.registry["name1"]()
self.assertTrue(self.n1_called)
self.registry["name2"]()
self.assertTrue(self.n2_called) | [
9,
14376
] |
def METHOD_NAME():
parser = argparse.ArgumentParser(description="Go NGCF")
parser.add_argument(
'--batch_size',
type=int,
default=1024,
help="the batch size for bpr loss training procedure")
parser.add_argument(
'--recdim', type=int, default=64, help="the embedding size of NGCF")
parser.add_argument(
'--n_layers', type=int, default=3, help="the layer num of NGCF")
parser.add_argument(
'--lr', type=float, default=0.0001, help="the learning rate")
parser.add_argument(
'--decay',
type=float,
default=1e-5,
help="the weight decay for l2 normalizaton")
parser.add_argument(
'--test_batch_size',
type=int,
default=1024,
help="the batch size of users for testing")
parser.add_argument(
'--dataset',
type=str,
default='gowalla',
help="available datasets: [lastfm, gowalla, yelp2018, amazon-book]")
parser.add_argument(
'--path',
type=str,
default="./checkpoints",
help="path to save weights")
parser.add_argument(
'--topks', nargs='?', default="[20]", help="@k test list")
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--seed', type=int, default=2020, help='random seed')
return parser.METHOD_NAME() | [
214,
335
] |
async def METHOD_NAME(
self,
credential_definition: dict,
credential_data: dict,
credential_request_metadata: dict,
credential_attr_mime_types=None,
credential_id: str = None,
rev_reg_def: dict = None,
):
"""Store a credential in the wallet.
Args:
credential_definition: Credential definition for this credential
credential_data: Credential data generated by the issuer
credential_request_metadata: credential request metadata generated
by the issuer
credential_attr_mime_types: dict mapping attribute names to (optional)
MIME types to store as non-secret record, if specified
credential_id: optionally override the stored credential id
rev_reg_def: revocation registry definition in json
Returns:
the ID of the stored credential
""" | [
1308,
2540
] |
def METHOD_NAME(self, request):
return request | [
365,
157,
335
] |
def METHOD_NAME(model):
predictions = []
references = []
for batch in librispeech_test_clean:
audio = batch["audio"]
input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features
reference = processor.tokenizer._normalize(batch['text'])
references.append(reference)
with torch.no_grad():
predicted_ids = model.generate(input_features)[0]
transcription = processor.decode(predicted_ids)
prediction = processor.tokenizer._normalize(transcription)
predictions.append(prediction)
wer_result = wer.compute(references=references, predictions=predictions)
print(f"Result wer: {wer_result * 100}")
accuracy = 1 - wer_result
print("Accuracy: %.5f" % accuracy)
return accuracy | [
1171,
717
] |
def METHOD_NAME(das, filename):
"""
Saves a series of spectra
das (list of DataArray): data to save
filename (str)
"""
exporter = dataio.find_fittest_converter(filename)
if os.path.exists(filename):
# mostly to warn if multiple ypos/xpos are rounded to the same value
logging.warning("Overwriting file '%s'.", filename)
else:
logging.info("Saving file '%s", filename)
exporter.export(filename, das) | [
73,
365
] |
def METHOD_NAME():
x, y = te.var("x"), te.var("y")
equations = [
tvm.tir.EQ(x + y, 20),
tvm.tir.EQ(x - y, 10),
]
solution = arith.solve_linear_equations(equations)
assert len(solution.src_to_dst) == 0
assert len(solution.dst_to_src) == 0
assert len(solution.src.variables) == 0
assert len(solution.src.ranges) == 0
assert ir.structural_equal(solution.src.relations, equations)
assert ir.structural_equal(solution.src, solution.dst) | [
9,
35,
486,
24,
283
] |
def METHOD_NAME(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.get_username(User("foobar")) | [
9,
19,
2072
] |
def METHOD_NAME(self):
response_content = b'HTTP/1.1 404 Not Found\r\nContent-Length: 105\r\nConnection: keep-alive\r\n' \
b'Content-Encoding: gzip\r\nCache-Control: no-cache\r\n' \
b'Content-Type: application/json; charset=utf-8\r\nOData-Version: 4.0\r\n' \
b'\r\n\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x0b\x15\xc71\n\x800\x0c\x05\xd0\xab|\xb2t\x11' \
b'\x07\x17\xc5Sx\x05M\xa3\x14\xdaD\xda:\x94\xe2\xdd\xc5\xb7\xbdN\x92\xb3eZ;\xb1y\xa1\x95' \
b'\xa6y\xa1\x81\x92\x94\xb2_\xff\x9d\x7fRj\x0e\xbc+\xd4*\x0e\xc1i\x8fz\x04\x05[\x8c\xc25' \
b'\x98\xc2N\xd4v\x0b\xdc\x96\x8d\xa5\x147\xd2\xfb~Od\xf8E^\x00\x00\x00'
response = RestService.build_response_from_raw_bytes(response_content)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.headers.get("Content-Length"), "105")
self.assertEqual(response.headers.get("Connection"), "keep-alive")
self.assertEqual(response.headers.get("Content-Encoding"), "gzip")
self.assertEqual(response.headers.get("Cache-Control"), "no-cache")
self.assertEqual(response.headers.get("Content-Type"), "application/json; charset=utf-8")
self.assertEqual(response.headers.get("OData-Version"), "4.0")
self.assertEqual(
response.text,
"{\"error\":{\"code\":\"278\",\"message\":\"\'dummy\' "
"can not be found in collection of type \'Process\'.\"}}") | [
9,
56,
17,
280,
958,
17,
130
] |
def METHOD_NAME(self):
age = app.config.get("HARVESTER_ZOMBIE_AGE")
since = dates.format(dates.before_now(age))
actives = models.BackgroundJob.active(self.__action__, since=since)
if self.background_job.id in [a.id for a in actives] and len(actives) == 1:
return True
if len(actives) == 0:
return True
return False | [
246,
1273
] |
async def METHOD_NAME(self):
assert task_run.name is None | [
9,
156,
137,
98,
1646,
130,
0
] |
def METHOD_NAME(self):
map = PortMappings()
map.add(port=[123, 1337], protocol="tcp")
map.add(port=[124, 1338], protocol="tcp")
self.assertEqual("-p 123-1338:123-1338", map.to_str()) | [
9,
237,
1844,
97,
234,
661
] |
METHOD_NAME(self): | [
59,
2786
] |
def METHOD_NAME(self, dbcon):
count_obj = len(list(dbcon.database[self.TEST_PROJECT_NAME].find({})))
assert 15 == count_obj | [
9,
635,
190
] |
def METHOD_NAME(self):
return self.get_origin_role_members('user') | [
3467
] |
def METHOD_NAME(url:str, *, httpsify:bool=False) -> urllib.parse.ParseResult:
url_parsed = _replace_urls(url)
if httpsify:
url_parsed = _httpsify_and_remove_tracking_urls(url)
else:
url_parsed = urllib.parse.urlparse(url)
return url_parsed | [
2549,
17736
] |
def METHOD_NAME() -> BackendConfig:
"""
Return the `BackendConfig` for PyTorch's native QNNPACK backend.
"""
conv_dtype_configs = [
qnnpack_weighted_op_qint8_symmetric_dtype_config,
qnnpack_weighted_op_quint8_dtype_config,
]
linear_dtype_configs = [
qnnpack_weighted_op_qint8_symmetric_dtype_config,
qnnpack_weighted_op_quint8_dtype_config,
qnnpack_default_dynamic_int8_dtype_config,
qnnpack_default_dynamic_float16_dtype_config,
]
binary_op_dtype_configs = [
qnnpack_default_op_qint8_symmetric_dtype_config,
qnnpack_default_op_quint8_dtype_config,
]
default_op_dtype_configs = [
qnnpack_default_op_qint8_symmetric_dtype_config,
qnnpack_default_op_quint8_dtype_config,
]
fixed_qparams_op_dtype_configs = [
qnnpack_default_op_qint8_symmetric_dtype_config,
qnnpack_default_op_quint8_dtype_config,
]
share_qparams_op_dtype_configs = [
qnnpack_default_op_qint8_symmetric_dtype_config,
qnnpack_default_op_quint8_dtype_config,
]
rnn_op_dtype_configs = [
qnnpack_default_dynamic_int8_dtype_config,
qnnpack_default_dynamic_float16_dtype_config,
]
embedding_op_dtype_configs = [
qnnpack_weight_only_quint8_dtype_config,
qnnpack_weight_only_quint4x2_dtype_config,
]
return BackendConfig("qnnpack") \
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
.set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs)) | [
19,
-1,
3127,
200
] |
def METHOD_NAME(self):
"""
Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector}
"""
if self.urlopener is None:
return u2.build_opener(*self.u2handlers())
else:
return self.urlopener | [
-1
] |
def METHOD_NAME(run_component):
run_component([
"--input", input_data_path,
"--output", "foo.h5mu",
"--modality", "rna",
"--top_n_vars", "10,20,90",
"--output_compression", "gzip"
])
assert Path("foo.h5mu").is_file()
data_with_qc = md.read("foo.h5mu")
var, obs = data_with_qc.mod['rna'].var, data_with_qc.mod['rna'].obs
for top_n_vars in ("10", "20", "90"):
assert f"pct_of_counts_in_top_{top_n_vars}_vars" in obs
assert "total_counts" in obs
assert "num_nonzero_vars" in obs
assert "pct_dropout" in var
assert "num_nonzero_obs" in var
assert "obs_mean" in var
assert "total_counts" in var | [
9,
238,
7646
] |
def METHOD_NAME(self):
config = AutoConfig.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(config, RobertaConfig) | [
9,
200,
578,
44,
280,
578,
769
] |
def METHOD_NAME(self):
archive = IArchive( 'cube.abc' )
self.assertEqual(archive.getMaxNumSamplesForTimeSamplingIndex(0), 1)
self.assertEqual(archive.getMaxNumSamplesForTimeSamplingIndex(1), 3)
top = archive.getTop()
self.assertEqual(top.getNumChildren(), 1)
# IXform Object
xform = top.getChild( 0 )
self.assertEqual(xform.getName(), 'cube1')
# IXform Properties
xformTopCP = xform.getProperties()
xformCP = xformTopCP.getProperty( 0 );
inherits = xformCP.getProperty( '.inherits' )
ops = xformCP.getProperty( '.ops' )
vals = xformCP.getProperty( '.vals' )
self.assertEqual(inherits.getNumSamples(), numSamplesPerCycle)
self.assertEqual(ops.getNumSamples(), numSamplesPerCycle)
self.assertEqual(vals.getNumSamples(), numSamplesPerCycle)
# Get the time sampling associated with vals.
tSamp = vals.getTimeSampling()
# Access index at a given time
index0 = tSamp.getNearIndex( tvec[0] + 0.1, numSamplesPerCycle )
index1 = tSamp.getCeilIndex( tvec[0] + 0.1, numSamplesPerCycle )
index2 = tSamp.getFloorIndex( tvec[2] + 0.1, numSamplesPerCycle )
self.assertEqual(index0, 0)
self.assertEqual(index1, 1)
self.assertEqual(index2, 2)
val0 = vals.samples[index0]
val1 = vals.samples[index1]
val2 = vals.samples[index2]
self.assertEqual(val0, xformvec[0])
self.assertEqual(val1, xformvec[1])
self.assertEqual(val2, xformvec[2])
# IPolyMesh Object
mesh = xform.getChild( 'cube1Shape' )
meshTopCP = mesh.getProperties()
meshCP = meshTopCP.getProperty(0)
# IPolyMesh Properties
counts = meshCP.getProperty( '.faceCounts' )
indices = meshCP.getProperty( '.faceIndices' )
p = meshCP.getProperty( 'P' )
bnds = meshCP.getProperty( '.selfBnds' )
self.assertEqual(counts.samples[0], faceCounts)
self.assertEqual(indices.samples[0], faceIndices)
self.assertEqual(p.samples[0], points)
self.assertEqual(bnds.samples[0], selfBnds) | [
9,
512,
2387,
6438
] |
def METHOD_NAME(matchobj):
prefix = matchobj.group(1)
name = matchobj.group(2)
decoder = DECODERS.get(name)
method = DECODER_METHODS.get(name)
if decoder:
return f'{prefix}{ref(name)}'
elif method:
return f'{prefix}{methodref(name)}'
else:
# Return the fully matched text (aka don't replace)
return matchobj.group(0) | [
369,
41,
548
] |
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize('EndpointServicesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem) | [
297,
365
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"managedInstanceName", self.ctx.args.managed_instance_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters | [
274,
386
] |
def METHOD_NAME(regexes, the_list):
return all([re.match(regex, item) for regex, item in zip(regexes, the_list)]) | [
10524,
590
] |
def METHOD_NAME():
r"""
a b c
|╳| |
e d |
| \|
g f
"""
def fd(a, b):
pass
def fe(b, a):
pass
def ff(c, d):
pass
return scgraph.Graph({'d': fd, 'e': fe, 'f': ff, 'g': 'e'}) | [
303,
1506
] |
def METHOD_NAME(self):
self.run_test("""
def function_alias0():
def p(): return 0
g = p
return g()""",
function_alias0=[]) | [
9,
559,
-1
] |
def METHOD_NAME(self, userList):
"""
Delete profiles for a list of users
"""
credDict = self.getRemoteCredentials()
requesterUserName = credDict["username"]
admin = Properties.SERVICE_ADMINISTRATOR in credDict["properties"]
for entry in userList:
userName = entry
if admin or userName == requesterUserName:
result = self.upDB.deleteUserProfile(userName)
if not result["OK"]:
return result
return S_OK() | [
294,
34,
1348
] |
def METHOD_NAME(self):
if self.debugNP.is_hidden():
self.debugNP.show()
else:
self.debugNP.hide() | [
766,
290
] |
def METHOD_NAME(self, txn: Cursor) -> None:
"""Gets called when setting up a brand new database. This allows us to
apply stricter checks on new databases versus existing database.
""" | [
250,
80,
463
] |
def METHOD_NAME(self, wallet_manager):
if wallet_manager is None:
return []
METHOD_NAME = []
for wallet in wallet_manager.METHOD_NAME.values():
if self in wallet.devices:
METHOD_NAME.append(wallet)
return METHOD_NAME | [
5509
] |
def METHOD_NAME(args: argparse.Namespace, work_dir: Path):
config = get_config(args.config)
run_scripts = generate_test_scripts(config, work_dir)
dump_test_scripts(run_scripts, work_dir)
# clone the examples repo
Repo.clone_from(EXAMPLE_URL, work_dir.joinpath("examples"))
return run_scripts | [
123,
586,
450
] |
def METHOD_NAME(self):
date_span = self.root.xpath(".//h5[2]")[0].text_content()
start_year, end_year = re.search(r"(\d{4}).+(\d{4})", date_span).groups()
table_rows = self.root.xpath(".//tbody//tr")
agenda_items_list = []
for row_item in table_rows:
columns = row_item.xpath("./td")
columns_content = [x.text_content().strip() for x in columns]
if len(columns_content) > 7:
columns_content = columns_content[:7]
part_date, bill, agenda_time, com, sub_com, loc, descr = columns_content
# Example Column:
# bill part_date com sub_com loc descr
# HB 1111 | 12/08 2:00 PM | Joint Approps | | 327E | Funding bill
full_bill_name_match = re.search(r"[A-Z]{1,4}\s+\d+", bill)
if full_bill_name_match:
bill_name = full_bill_name_match.group()
else:
partial_bill_name_match = re.search(r"\d+", bill)
if partial_bill_name_match:
match_in_descr = re.search(r"[A-Z]{1,4}\s{0,3}\d+", descr)
if match_in_descr:
bill_name = match_in_descr.group()
else:
bill_link = columns[1].xpath("./a")[0].get("href")
bill_name_scraper = BillNameScraper(bill_link)
bill_name = bill_name_scraper.get_bill_name()
else:
bill_name = ""
date_time_parts = part_date.split()
month = int(re.search(r"\d+", date_time_parts[0]).group())
if month == 1 and not start_year == end_year:
event_year = end_year
else:
event_year = start_year
date_time_parts[0] += f"/{event_year}"
date_time = " ".join(date_time_parts)
agenda_item = {
"bill_name": bill_name,
"date_time": date_time,
"agenda_time": agenda_time,
"committee": com,
"sub_com": sub_com,
"location": loc,
"description": descr,
}
agenda_items_list.append(agenda_item)
events = EventConsolidator(agenda_items_list, self.source.url)
yield from events.consolidate() | [
356,
1174
] |
def METHOD_NAME(self) -> Optional['outputs.ServicesResourceResponseIdentity']:
"""
Setting indicating whether the service has a managed identity associated with it.
"""
return pulumi.get(self, "identity") | [
2989
] |
def METHOD_NAME(self, project):
config = f"""
projects_and_groups:
{project.path_with_namespace}:
badges:
pipeline-status:
name: "Project Badge"
link_url: "https://gitlab.example.com/%{{project_path}}/-/commits/%{{default_branch}}/foo"
image_url: "https://gitlab.example.com/%{{project_path}}/badges/%{{default_branch}}/pipeline.svg"
"""
run_gitlabform(config, project)
badges = get_project_badges(project)
assert len(badges) == 1
assert badges[0].name == "Project Badge"
config = f"""
projects_and_groups:
{project.path_with_namespace}:
badges:
pipeline-status:
name: "Project Badge"
delete: true
"""
run_gitlabform(config, project)
badges = get_project_badges(project)
assert len(badges) == 0 | [
9,
3233,
34
] |
def METHOD_NAME(self, event):
pass | [
69,
1100
] |
f METHOD_NAME(self, opt, steps=5): | [
231,
968
] |
def METHOD_NAME(ts):
if ts[-1] != 'Z':
raise Exception("missing time zone marker Z")
ts = ts[:-1]
parts = ts.split(".")
ts = datetime.strptime(parts[0], "%Y-%m-%dT%H:%M:%S")
if len(parts[1]) > 6:
parts[1] = parts[1][:6] # ensure we always parse microseconds
return ts + timedelta(microseconds=datetime.strptime(parts[1], "%f").microsecond) | [
214,
2722
] |
def METHOD_NAME(
self, node_type, node_id, cpu, memory, gpu_stats=[]
):
node = self._job_nodes[node_type][node_id]
node.update_resource_usage(cpu, memory, gpu_stats) | [
86,
1716,
191,
558
] |
def METHOD_NAME(self):
ts = self.read_files()
# to modify the number of data
g = [("d=%s" % str(i+1)) for i, _ in enumerate(ts)]
x = []
gs = []
bi = []
for its, pts in enumerate(ts):
for ets in pts:
gs.append(g[its])
x.append(float(ets))
binom = poisson(lam=its, size=29289)
for b in binom:
bi.append(b)
df = pd.DataFrame({"x": x[:len(bi)], "b": bi, "g": gs[:len(bi)]})
# df_poisson = pd.DataFrame(dict(x=x, g=data_binom))
# bi = sns.FacetGrid(,
# aspect=15,
# height=2)
g = sns.FacetGrid(df,
row="g",
hue="g",
aspect=15,
height=2)
# g.map(sns.distplot, "x", data_binom, ked=False)
# bi.map(sns.kdeplot)
g.map(sns.distplot, "b", kde=True)
g.map(sns.distplot, "x", kde=False)
# g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=True)
g.map(self.label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play well with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
plt.show() | [
3701,
17897
] |
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC
if not self._has_neon_support:
del self.options.neon
if not self._has_msa_support:
del self.options.msa
if not self._has_sse_support:
del self.options.sse
if not self._has_vsx_support:
del self.options.vsx | [
200,
1881
] |
def METHOD_NAME(instance_profile_name, aws_auth_cred):
"""
Check wheter the given IAM instance profile already exists in the AWS Account
Args:
instance_profile_name (str): Instance profile name
aws_auth_cred (dict): AWS Auth details with region information
Returns:
Boolean: True if env exists else False
"""
iam_client = get_iam_client(aws_auth_cred)
try:
profile = iam_client.get_instance_profile(InstanceProfileName=instance_profile_name)
return True if profile else False
except:
return False | [
250,
89,
337,
954
] |
def METHOD_NAME(private_key: Union[str, bytes]) -> str:
try:
if isinstance(private_key, str):
key = SigningKey(bytes.fromhex(private_key))
elif isinstance(private_key, bytes):
key = SigningKey(private_key)
str_key = key_to_str(key)
if str_key == private_key:
return str_key
except Exception:
pass
raise Exception(f"{NODE_PRIVATE_KEY} is invalid") | [
187,
547,
59
] |
def METHOD_NAME(filepath: Union[str, Optional[Path]], strict=False) -> Path:
return Path(filepath).expanduser().absolute().resolve(strict=strict) | [
1014,
4653,
157
] |
def METHOD_NAME(
mocked_refund,
staff_api_client,
permission_group_manage_orders,
order_with_lines,
payment_dummy,
count_queries,
):
query = """
mutation OrderFulfillmentRefundProducts(
$order: ID!, $input: OrderRefundProductsInput!
) {
orderFulfillmentRefundProducts(
order: $order,
input: $input
) {
fulfillment{
id
status
lines{
id
quantity
orderLine{
id
}
}
}
errors {
field
code
message
warehouse
orderLines
}
}
}
"""
permission_group_manage_orders.user_set.add(staff_api_client.user)
payment_dummy.total = order_with_lines.total_gross_amount
payment_dummy.captured_amount = payment_dummy.total
payment_dummy.charge_status = ChargeStatus.FULLY_CHARGED
payment_dummy.save()
order_with_lines.payments.add(payment_dummy)
line_to_refund = order_with_lines.lines.first()
order_id = graphene.Node.to_global_id("Order", order_with_lines.pk)
line_id = graphene.Node.to_global_id("OrderLine", line_to_refund.pk)
variables = {
"order": order_id,
"input": {"orderLines": [{"orderLineId": line_id, "quantity": 2}]},
}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["orderFulfillmentRefundProducts"]["fulfillment"] | [
9,
14626,
7564,
4866,
852,
513
] |
def METHOD_NAME(self):
self.assertEqual({
'level_seed': TensorSpec(shape=(), dtype=torch.int32),
'prev_level_complete': TensorSpec(shape=(), dtype=torch.uint8),
'prev_level_seed': TensorSpec(shape=(), dtype=torch.int32)
}, self._env.env_info_spec())
self.assertEqual({
'rgb':
BoundedTensorSpec(
shape=(3, 64, 64),
dtype=torch.uint8,
minimum=np.array(0),
maximum=np.array(255))
}, self._env.observation_spec())
self.assertEqual(
BoundedTensorSpec(
shape=(),
dtype=torch.int32,
minimum=np.array(0),
maximum=np.array(14)), self._env.action_spec()) | [
9,
15012,
2016
] |
def METHOD_NAME(rst, references=None, destination=sys.stdout):
"""
Convert *rst* to a lazy string.
If *destination* is a file-like object connected to a terminal,
enrich text with suitable ANSI escapes. Otherwise return plain text.
"""
if is_terminal(destination):
rst_state_hook = ansi_escapes
else:
rst_state_hook = None
return RstToTextLazy(rst, rst_state_hook, references) | [
1320,
24,
1019
] |
def METHOD_NAME(pace_str):
if pace_str.count(':') != 1:
return 0.0
else:
_prts = pace_str.split(':')
try:
_p_min = int(_prts[0])
_p_sec = int(_prts[1])
return float( _p_min + (_p_sec/60.0))
except:
return 0.0 | [
-1
] |
def METHOD_NAME(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros') | [
56
] |
def METHOD_NAME(self):
append_only_input_table1 = input_table(col_defs={"MyStr": dht.string})
append_only_input_table2 = input_table(col_defs={"MyInt": dht.int32})
def table_generator_function():
my_str = append_only_input_table1.last_by().j_table.getColumnSource('MyStr').get(0)
my_int = append_only_input_table2.last_by().j_table.getColumnSource('MyInt').getInt(0)
return new_table([
string_col('ResultStr', [my_str]),
int_col('ResultInt', [my_int]),
])
result_table = function_generated_table(table_generator_function,
source_tables=[append_only_input_table1, append_only_input_table2])
self.assertEqual(result_table.size, 1)
result_str = result_table.j_table.getColumnSource("ResultStr").get(0)
result_int = result_table.j_table.getColumnSource("ResultInt").get(0)
self.assertEqual(result_str, None)
self.assertEqual(result_int, None)
append_only_input_table1.add(new_table([string_col(name='MyStr', data=['test string'])]))
self.wait_ticking_table_update(append_only_input_table1, row_count=1, timeout=30)
self.assertEqual(result_table.size, 1)
result_str = result_table.j_table.getColumnSource("ResultStr").get(0)
result_int = result_table.j_table.getColumnSource("ResultInt").get(0)
self.assertEqual(result_str, 'test string')
self.assertEqual(result_int, None)
append_only_input_table2.add(new_table([int_col(name='MyInt', data=[12345])]))
self.wait_ticking_table_update(append_only_input_table2, row_count=1, timeout=30)
self.assertEqual(result_table.size, 1)
result_str = result_table.j_table.getColumnSource("ResultStr").get(0)
result_int = result_table.j_table.getColumnSource("ResultInt").get(0)
self.assertEqual(result_str, 'test string')
self.assertEqual(result_int, 12345)
with update_graph.exclusive_lock(self.test_update_graph):
append_only_input_table1.add(new_table([string_col(name='MyStr', data=['test string 2'])]))
append_only_input_table2.add(new_table([int_col(name='MyInt', data=[54321])]))
self.wait_ticking_table_update(append_only_input_table1, row_count=2, timeout=30)
self.wait_ticking_table_update(append_only_input_table2, row_count=2, timeout=30)
self.assertEqual(result_table.size, 1)
result_str = result_table.j_table.getColumnSource("ResultStr").get(0)
result_int = result_table.j_table.getColumnSource("ResultInt").get(0)
self.assertEqual(result_str, 'test string 2')
self.assertEqual(result_int, 54321) | [
9,
4207,
410,
-1
] |
def METHOD_NAME():
if(len(sys.argv) == 2):
if(sys.argv[1] == "app"):
print("Copying resources ...")
dst = os.environ["TARGET_BUILD_DIR"] + "/" + os.environ["UNLOCALIZED_RESOURCES_FOLDER_PATH"]
if os.path.exists(projectpath + "/resources/img/"):
imgs = os.listdir(projectpath + "/resources/img/")
for img in imgs:
print("copying " + img + " to " + dst)
shutil.copy(projectpath + "/resources/img/" + img, dst)
if os.path.exists(projectpath + "/resources/fonts/"):
fonts = os.listdir(projectpath + "/resources/fonts/")
for font in fonts:
print("copying " + font + " to " + dst)
shutil.copy(projectpath + "/resources/fonts/" + font, dst)
config = parse_config(projectpath)
xcconfig = parse_xcconfig(os.path.join(os.getcwd(), IPLUG2_ROOT + '/common-ios.xcconfig'))
CFBundleGetInfoString = config['BUNDLE_NAME'] + " v" + config['FULL_VER_STR'] + " " + config['PLUG_COPYRIGHT_STR']
CFBundleVersion = config['FULL_VER_STR']
CFBundlePackageType = "BNDL"
CSResourcesFileMapped = True
LSMinimumSystemVersion = xcconfig['DEPLOYMENT_TARGET']
print("Processing Info.plist files...") | [
57
] |
def METHOD_NAME(self):
self.setup_step(subunit.SubunitShellCommand(command='test'))
self.expect_commands(
ExpectShell(workdir='wkdir',
command="test")
.exit(0)
)
self.expect_outcome(result=SUCCESS,
state_string="shell no tests run")
return self.run_step() | [
9,
35
] |
def METHOD_NAME(self, word, distance):
"""Find the best match with {word} in a trie.
:arg str word: Query word.
:arg int distance: Maximum allowed distance.
:returns str: Best match with {word}.
"""
if self.get(word):
return word
for i in range(1, distance + 1):
result = self.hamming(word, i)
if result is not None:
return result
return None | [
2192,
2771
] |
def METHOD_NAME(notebooks, output_notebook, kernel_name):
notebook_path = notebooks["data_split"]
pm.execute_notebook(notebook_path, output_notebook, kernel_name=kernel_name) | [
9,
365,
265,
420
] |
def METHOD_NAME(self):
requestBuilder = RequestMockBuilder(
{
"zoo.animals.insert": (
None,
'{"data": {"foo": "bar"}}',
'{"data": {"foo": "bar"}}',
)
}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
zoo.animals().insert(body='{"data": {"foo": "blah"}}').execute()
self.fail("UnexpectedBodyError should have been raised")
except UnexpectedBodyError:
pass | [
9,
53,
909,
2829
] |
def METHOD_NAME(self):
n = 10
k = 3
G = random_uniform_k_out_graph(n, k, with_replacement=True)
assert G.is_multigraph()
assert all(d == k for v, d in G.out_degree())
n = 10
k = 9
G = random_uniform_k_out_graph(n, k, with_replacement=False, self_loops=False)
assert nx.number_of_selfloops(G) == 0
assert all(d == k for v, d in G.out_degree()) | [
9,
41,
3729
] |
def METHOD_NAME(self, client: MLClient):
# load_component
score_func = load_component("./tests/test_configs/pipeline_jobs/job_with_registry_model_as_input/score.yml")
pipeline_score_model = Input(
type="custom_model", path="azureml://registries/sdk-test/models/iris_model/versions/1"
)
assert_pipeline_job_cancel(client, score_func, pipeline_score_model, self.test_data) | [
9,
1148,
41,
125,
1007,
61,
510
] |
def METHOD_NAME(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME() -> None:
vgp0, vgp1, cvgp = _prepare_models()
pred_ydensity0 = vgp0.predict_log_density((Datum.Xtest, Datum.Ytest))
pred_ydensity_c0 = cvgp.predict_log_density((Datum.Xtest_augmented0, Datum.Ytest_augmented0))
assert_allclose(pred_ydensity0, pred_ydensity_c0, atol=1e-2)
pred_ydensity1 = vgp1.predict_log_density((Datum.Xtest, Datum.Ytest))
pred_ydensity_c1 = cvgp.predict_log_density((Datum.Xtest_augmented1, Datum.Ytest_augmented1))
assert_allclose(pred_ydensity1, pred_ydensity_c1, atol=1e-2) | [
9,
2103,
390,
2915
] |
def METHOD_NAME(self):
key = "0123456789abcdef"
db_token = Token("motp002", tokentype="motp")
db_token.save()
token = MotpTokenClass(db_token)
token.update({"otpkey": key,
"motppin": "6666",
"pin": "test"})
self.assertTrue(token.token.tokentype == "motp", token.token.tokentype)
self.assertTrue(token.type == "motp", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "PIMO", class_prefix)
self.assertTrue(token.get_class_type() == "motp", token)
# Correct OTP value
r = token.check_otp("6ed4e4", options={"initTime": 129612120})
self.assertTrue(r == 129612120, r)
# Check the same value again
r = token.check_otp("6ed4e4", options={"initTime": 129612120})
self.assertTrue(r == -1, r) | [
9,
11496,
1772,
3595,
99
] |
def METHOD_NAME(request, group_slug, project_slug):
EnvironmentFormSet = forms.inlineformset_factory(Project, Environment, form=EnvironmentForm, extra=1)
if request.method == "POST":
form = EnvironmentFormSet(request.POST, instance=request.project)
if form.is_valid():
form.save()
return redirect(request.path)
else:
form = EnvironmentFormSet(instance=request.project)
context = {
'group': request.group,
'project': request.project,
'form': form,
}
return render(request, 'squad/project_settings/environments.jinja2', context) | [
4793
] |
def METHOD_NAME(self):
# This test is complicated because we can't import
# ckanext.datastore.plugin before running it. If we did so, the
# DatastorePlugin class would be parsed which breaks the reason of our
# test.
p.load("sample_datastore_plugin")
thrown_exception = None
try:
p.load("datastore")
except Exception as e:
thrown_exception = e
idatastores = [
x.__class__.__name__
for x in p.PluginImplementations(interfaces.IDatastore)
]
p.unload("sample_datastore_plugin")
assert thrown_exception is not None, (
'Loading "datastore" after another IDatastore plugin was'
"loaded should raise DatastoreException"
)
assert (
thrown_exception.__class__.__name__
== plugin.DatastoreException.__name__
)
assert plugin.DatastorePlugin.__name__ not in idatastores, (
'You shouldn\'t be able to load the "datastore" plugin after'
"another IDatastore plugin was loaded"
) | [
9,
2309,
914,
679,
1993,
3160
] |
def METHOD_NAME(self):
with open_file("rsa_2048_key_cert.pfx") as rsa_2048_key_cert_pfx:
data = rsa_2048_key_cert_pfx.read()
key, certs = _parse_pkcs12(data, None)
self.assertEqual(1, len(certs)) | [
9,
214,
4341
] |
def METHOD_NAME(fmt):
""" Return the converter corresponding to a format name
:param fmt: (string) the format name
:returns: (module) the converter
:raises ValueError: in case no exporter can be found
"""
# Look dynamically which format is available
for module_name in _iomodules:
try:
converter = importlib.import_module("." + module_name, "odemis.dataio")
except (ValueError, TypeError, ImportError):
logging.info("Import of converter %s failed", module_name, exc_info=True)
continue # module cannot be loaded
if fmt == converter.FORMAT:
return converter
raise ValueError("No converter for format %s found" % fmt) | [
19,
1252
] |
def METHOD_NAME(tasks, engine_number_map):
"""Plot usage stats"""
plt.figure(figsize=(8, 8))
for engine_uuid, task_list in tasks.items():
engine_number = engine_number_map[engine_uuid]
number_list = [engine_number] * len(task_list)
start_list = [task['started'] for task in task_list]
completed_list = [task['completed'] for task in task_list]
plt.plot(
[number_list, number_list],
[start_list, completed_list], linewidth=10,
solid_capstyle="butt")
plt.xlim(min(engine_number_map.values()) - 1,
max(engine_number_map.values()) + 1)
plt.xlabel('Compute engine number')
plt.ylabel('Compute time')
idle_time, idle_perc = calculate_unused_compute(tasks)
plt.title(
'Cumulative idle time: %s, perc: %.2f %%' %
(idle_time, idle_perc)) | [
1288,
558
] |
nc def METHOD_NAME(self): | [
631
] |
def METHOD_NAME(self):
self._removeLeft()
self._removeRight()
self.setX(0)
self.setY(0)
self._v_goleft = True
self._v_goright = True | [
1828,
3217,
1179
] |
def METHOD_NAME(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = (
(2 / (np.sqrt(3 * width) * np.pi**0.25))
* (1 - (x - center) ** 2 / width**2)
* np.exp(-((x - center) ** 2) / (2 * width**2))
)
return x | [
11220,
559
] |
def METHOD_NAME(self):
super().METHOD_NAME()
# First set up the interesting contest, with a few copies
# of everything.
self.contest = self.add_contest()
self.participation = self.add_participation(contest=self.contest)
self.tasks = [
self.add_task(contest=self.contest),
self.add_task(contest=self.contest)
]
self.datasets = sum([[
self.add_dataset(task=task, autojudge=False),
self.add_dataset(task=task, autojudge=True),
self.add_dataset(task=task, autojudge=False),
] for task in self.tasks], [])
# Similarly to esoperationstest, the active dataset is not
# autojudged.
for task in self.tasks:
task.active_dataset = task.datasets[0]
self.testcases = sum([[
self.add_testcase(dataset),
self.add_testcase(dataset),
self.add_testcase(dataset),
] for dataset in self.datasets], [])
self.session.flush() | [
0,
1
] |
def METHOD_NAME(self):
return 5 | [
3362
] |
def METHOD_NAME():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url="http://localhost/api/v4/bulk_imports/1/entities",
json=[entity_content],
content_type="application/json",
status=200,
)
yield rsps | [
3204,
245,
2278,
512,
5399
] |
def METHOD_NAME(self, pyramid_request, search_index, UserRenameService):
svc = service_factory(sentinel.context, pyramid_request)
UserRenameService.assert_called_once_with(
session=pyramid_request.db, search_index=search_index
)
assert svc == UserRenameService.return_value | [
9,
1807
] |
def METHOD_NAME(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsMakeOneOnComp
"""
return super().METHOD_NAME | [
1461
] |
def METHOD_NAME(fname, cuts):
with open(fname, 'rb') as fp:
doc = etree.parse(fp)
lines = doc.findall('.//{*}TextLine')
char_idx = 0
for line, line_cuts in zip(lines, cuts):
idx = 0
for el in line:
if el.tag.endswith('Shape'):
continue
elif el.tag.endswith('SP'):
idx += 1
elif el.tag.endswith('String'):
str_len = len(el.get('CONTENT'))
# clear out all
for chld in el:
if chld.tag.endswith('Glyph'):
el.remove(chld)
for char in line_cuts[idx:str_len]:
glyph = etree.SubElement(el, 'Glyph')
glyph.set('ID', f'char_{char_idx}')
char_idx += 1
glyph.set('CONTENT', char[0])
glyph.set('GC', str(char[2]))
pol = etree.SubElement(etree.SubElement(glyph, 'Shape'), 'Polygon')
pol.set('POINTS', ' '.join([str(coord) for pt in char[1] for coord in pt]))
idx += str_len
with open(f'{os.path.basename(fname)}_algn.xml', 'wb') as fp:
doc.write(fp, encoding='UTF-8', xml_declaration=True) | [
960,
9395
] |
def METHOD_NAME(fullname: str) -> str:
"""Return a C name usable for an exported definition.
This is like private_name(), but the output only depends on the
'fullname' argument, so the names are distinct across multiple
builds.
"""
# TODO: Support unicode
return fullname.replace("___", "___3_").replace(".", "___") | [
6662,
156
] |
def METHOD_NAME(catalog_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCatalogResult:
"""
Get a Catalog
Azure REST API version: 2022-09-01-preview.
:param str catalog_name: Name of catalog
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['catalogName'] = catalog_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:azuresphere:getCatalog', __args__, opts=opts, typ=GetCatalogResult).value
return AwaitableGetCatalogResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type')) | [
19,
2824
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.