text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(value, css_class):
"""
http://stackoverflow.com/questions/4124220/django-adding-css-classes-when-rendering-form-fields-in-a-template
Inserts classes into template variables that contain HTML tags,
useful for modifying forms without needing to change the Form objects.
Usage:
{{ field.label_tag|add_class:"control-label" }}
In the case of REST Framework, the filter is used to add Bootstrap-specific
classes to the forms.
"""
html = str(value)
match = class_re.search(html)
if match:
m = re.search(r'^%s$|^%s\s|\s%s\s|\s%s$' % (css_class, css_class,
css_class, css_class),
match.group(1))
if not m:
return mark_safe(class_re.sub(match.group(1) + " " + css_class,
html))
else:
return mark_safe(html.replace('>', ' class="%s">' % css_class, 1))
return value | [
238,
2
] |
def METHOD_NAME(self):
view = ListViewFilter()
self.assertRaises(Exception, view.setModel, PyListModel()) | [
9,
0,
578
] |
def METHOD_NAME():
expected = np.array([1, 1, 1])
hexcode = '#FFFFFF'
res = colormap.hex_to_rgb(hexcode)
npt.assert_array_almost_equal(res, expected)
hashed_hexcode = 'FFFFFF'
res = colormap.hex_to_rgb(hashed_hexcode)
npt.assert_array_almost_equal(res, expected) | [
9,
696,
24,
2310
] |
def METHOD_NAME(model: gmsh.model, name: str) -> gmsh.model:
"""Create a Gmsh model of a ring-type geometry using hexahedral cells.
Args:
model: Gmsh model to add the mesh to.
name: Name (identifier) of the mesh to add.
Returns:
Gmsh model with a sphere mesh added.
"""
model.add(name)
model.setCurrent(name)
# Recombine tetrahedra to hexahedra
gmsh.option.setNumber("Mesh.RecombinationAlgorithm", 2)
gmsh.option.setNumber("Mesh.RecombineAll", 2)
gmsh.option.setNumber("Mesh.CharacteristicLengthFactor", 1)
circle = model.occ.addDisk(0, 0, 0, 1, 1)
circle_inner = model.occ.addDisk(0, 0, 0, 0.5, 0.5)
cut = model.occ.cut([(2, circle)], [(2, circle_inner)])[0]
extruded_geometry = model.occ.extrude(cut, 0, 0, 0.5, numElements=[5], recombine=True)
model.occ.synchronize()
model.addPhysicalGroup(2, [cut[0][1]], tag=1)
model.setPhysicalName(2, 1, "2D cylinder")
boundary_entities = model.getEntities(2)
other_boundary_entities = []
for entity in boundary_entities:
if entity != cut[0][1]:
other_boundary_entities.append(entity[1])
model.addPhysicalGroup(2, other_boundary_entities, tag=3)
model.setPhysicalName(2, 3, "Remaining boundaries")
model.mesh.generate(3)
model.mesh.setOrder(2)
volume_entities = []
for entity in extruded_geometry:
if entity[0] == 3:
volume_entities.append(entity[1])
model.addPhysicalGroup(3, volume_entities, tag=1)
model.setPhysicalName(3, 1, "Mesh volume")
return model | [
11310,
5402
] |
def METHOD_NAME(self):
"""The SubDomain in which the Eq is defined."""
return self._subdomain | [
302
] |
def METHOD_NAME(key):
global paramDict
if key in paramDict:
value = paramDict[key]
else:
raise RuntimeError
return value | [
19,
511
] |
def METHOD_NAME(self, user: User | RpcUser, provider: ExternalProviders) -> bool:
return self.get_identities_for_user(user, provider).exists() | [
220,
2989
] |
def METHOD_NAME(bytes: bytes) -> bytes:
"""
Pack a PER octect stream with the alternate read length indicator
:param bytes: octet stream
Currently unused, implemented to match exactly what was sent by mstsc.exe
on the wire.
"""
length = len(bytes)
return Uint16BE.pack(length | 0x8000) + bytes | [
77,
10622,
919,
4969
] |
def METHOD_NAME(self):
manager = ModelBuilder.build_sync(
Manager, defaults={Manager.name: "Guido"}
)
queried_manager = (
Manager.objects()
.where(Manager.id == manager.id)
.first()
.run_sync()
)
self.assertEqual(queried_manager.name, "Guido") | [
9,
1205,
105
] |
def METHOD_NAME():
parser = argparse.ArgumentParser(
description="dump PCM files from a WAV scp file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--write-num-frames", type=str, help="Specify wspecifer for utt2num_frames"
)
parser.add_argument(
"--filetype",
type=str,
default="mat",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for output. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"--format",
type=str,
default=None,
help="The file format for output pcm. "
"This option is only valid "
'when "--filetype" is "sound.hdf5" or "sound"',
)
parser.add_argument(
"--compress", type=strtobool, default=False, help="Save in compressed format"
)
parser.add_argument(
"--compression-method",
type=int,
default=2,
help="Specify the method(if mat) or " "gzip-level(if hdf5)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--normalize",
choices=[1, 16, 24, 32],
type=int,
default=None,
help="Give the bit depth of the PCM, "
"then normalizes data to scale in [-1,1]",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
parser.add_argument(
"--keep-length",
type=strtobool,
default=True,
help="Truncating or zero padding if the output length "
"is changed from the input by preprocessing",
)
parser.add_argument("rspecifier", type=str, help="WAV scp file")
parser.add_argument(
"--segments",
type=str,
help="segments-file format: each line is either"
"<segment-id> <recording-id> <start-time> <end-time>"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5",
)
parser.add_argument("wspecifier", type=str, help="Write specifier")
return parser | [
19,
1319
] |
def METHOD_NAME():
"""
>>> import ZODB.tests.util
>>> from persistent.wref import PersistentWeakKeyDictionary
>>> key = ZODB.tests.util.P("key")
>>> missing = ZODB.tests.util.P("missing")
>>> d = PersistentWeakKeyDictionary([(key, 1)])
>>> d.get(key)
1
>>> d.get(missing)
>>> d.get(missing, 12)
12
""" | [
9,
4545,
8029,
59,
2445,
19
] |
def METHOD_NAME(migrate_project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMigrateProjectResult]:
"""
Migrate Project REST Resource.
:param str migrate_project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that migrate project is part of.
"""
... | [
19,
2744,
155,
146
] |
def METHOD_NAME(app):
class DummyView(HTTPMethodView):
def get(self, request, my_param_here):
return text("I am get method with %s" % my_param_here)
app.add_route(DummyView.as_view(), "/<my_param_here>")
request, response = app.test_client.get("/test123")
assert response.text == "I am get method with test123" | [
9,
1545,
528
] |
def METHOD_NAME(self):
"""Test integrity of the dges file."""
self.string.run()
edge_file = os.path.join(self.string_output_dir, "edges.tsv")
self.assertTrue(os.path.isfile(edge_file))
edge_df = pd.read_csv(edge_file, sep="\t", header=0)
self.assertEqual((9, 20), edge_df.shape)
self.assertEqual(
[
"subject",
"predicate",
"object",
"relation",
"provided_by",
"type",
"combined_score",
"neighborhood",
"neighborhood_transferred",
"fusion",
"cooccurence",
"homology",
"coexpression",
"coexpression_transferred",
"experiments",
"experiments_transferred",
"database",
"database_transferred",
"textmining",
"textmining_transferred",
],
list(edge_df.columns),
) | [
9,
491,
171
] |
def METHOD_NAME(
table_data: Iterable[Iterable[Any]], true_repr: str = "*", false_repr: str = "/"
) -> Iterator[Iterator[Any]]:
"""Replace boolean value with string repr for all table values.
This function is an implementation of :func:`bool_str_repr` for a
2D table, for easy usage with tabulate.
Parameters
----------
table_data : Iterable[Iterable[Any]]
Data of the table e.g. a list of lists.
true_repr : str
Desired repr for ``True``, by default "*"
false_repr : str
Desired repr for ``False``, by default "/"
Returns
-------
Iterator[Iterator[Any]]
``table_data`` with original values or desired repr for bool
See Also
--------
bool_str_repr
Examples
--------
>>> table_data = [["foo", True, False], ["bar", False, True]]
>>> print(tabulate(bool_table_repr(table_data))
--- - -
foo * /
bar / *
--- - -
"""
bool_repr = partial(bool_str_repr, true_repr=true_repr, false_repr=false_repr)
return ((bool_repr(value) for value in values) for values in table_data) | [
863,
410,
92
] |
def METHOD_NAME(cores_in_mcpu: int) -> int:
cores_in_mcpu = max(1, cores_in_mcpu)
power = max(-2, math.ceil(math.log2(cores_in_mcpu / 1000)))
return int(2**power * 1000) | [
270,
4009,
43,
-1
] |
def METHOD_NAME(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
loss_evaluator = MaskRCNNLossComputation(
matcher, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION
)
return loss_evaluator | [
93,
65,
361,
1572,
8539
] |
def METHOD_NAME(value):
if is_value_present(value):
return value in ['True', 'true']
return None | [
214,
201
] |
def METHOD_NAME(self, name: str):
parent_sv = layer_resource_subvol(__package__, name)
with TempSubvolumes(Path(sys.argv[0])) as temp_subvols:
# Cannot use `.snapshot()` since that doesn't handle mounts.
child_sv = temp_subvols.caller_will_create(name)
ParentLayerItem.get_phase_builder(
[ParentLayerItem(from_target="t", subvol=parent_sv)],
DUMMY_LAYER_OPTS,
)(child_sv)
yield child_sv | [
963,
191,
12438
] |
f METHOD_NAME(self): | [
-1,
600
] |
def METHOD_NAME(self, iotree):
"""
Insert a StringIOTree (and all of its contents) at this location.
Further writing to self appears after what is inserted.
"""
self.commit()
self.prepended_children.append(iotree) | [
408
] |
def METHOD_NAME(self, context, internal_response):
"""
Manage account linking and recovery
:type context: satosa.context.Context
:type internal_response: satosa.internal.InternalData
:rtype: satosa.response.Response
:param context:
:param internal_response:
:return: response
:
"""
status_code, message = self._get_uuid(context, internal_response.auth_info.issuer, internal_response.subject_id)
data = {
"issuer": internal_response.auth_info.issuer,
"redirect_endpoint": "%s/account_linking%s" % (self.base_url, self.endpoint)
}
# Store the issuer subject_id/sub because we'll need it in handle_al_response
internal_response.attributes['issuer_user_id'] = internal_response.subject_id
if status_code == 200:
msg = "issuer/id pair is linked in AL service"
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.info(logline)
internal_response.subject_id = message
data['user_id'] = message
if self.id_to_attr:
internal_response.attributes[self.id_to_attr] = [message]
else:
msg = "issuer/id pair is not linked in AL service. Got a ticket"
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.info(logline)
data['ticket'] = message
jws = JWS(json.dumps(data), alg=self.signing_key.alg).sign_compact([self.signing_key])
context.state[self.name] = internal_response.to_dict()
return Redirect("%s/%s" % (self.redirect_url, jws)) | [
356
] |
def METHOD_NAME(name):
'''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the
file `name' when the context manager scope ends and the the context manager __exit__ is called. This
means writing out the complete file can be performed with less concern of corrupting the original file
if the process is interrupted by windows shutting down.
`name` must be unicode.
Usage:
with FaultTolerantFile("myFile.txt") as f:
f.write("This is a test")
This creates a temporary file, and the writes actually happen on this temp file. At the end of the
`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt"
'''
if not isinstance(name, text_type):
raise TypeError("name must be an unicode string")
dirpath, filename = os.path.split(name)
with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:
log.debug(f.name)
yield f
f.flush()
os.fsync(f)
f.close()
winKernel.moveFileEx(f.name, name, winKernel.MOVEFILE_REPLACE_EXISTING) | [
4492,
16095,
171
] |
def METHOD_NAME():
'''
Instance for facade proxy.
'''
return WinCPU() | [
89
] |
def METHOD_NAME(self):
"""
:avocado: tags=intel_iommu_intremap
"""
self.common_vm_setup(True)
self.vm.add_args('-device', 'intel-iommu,intremap=on')
self.vm.add_args('-machine', 'kernel_irqchip=split')
self.kernel_params = (self.distro.default_kernel_params +
' quiet intel_iommu=on')
self.run_and_check() | [
9,
9049,
9050
] |
def METHOD_NAME():
"""Test if dpnp works with numpy array (it shouldn't)"""
@dpjit
def func(x, fill_value):
y = dpnp.full_like(x, fill_value)
return y
a = numpy.ones(10)
with pytest.raises(Exception):
func(a, 7) | [
9,
5563,
324,
2307,
280,
2028
] |
def METHOD_NAME(self) -> float:
METHOD_NAME = float(self.other_kwargs["shrinkage_parameter"])
return METHOD_NAME | [
-1,
511
] |
def METHOD_NAME(self) -> None:
if len(self._logs) == 0:
return
msgs = []
while len(self._logs):
data = self._logs.popleft()
self._buf += data
while "\n" in self._buf:
idx = self._buf.index("\n") + 1
line = self._buf[:idx]
self._buf = self._buf[idx:]
msg = dict(self._logs_metadata)
msg["log"] = line
msgs.append(msg)
if len(msgs) > LOG_BATCH_MAX_SIZE:
self._ship(msgs)
msgs = []
if len(msgs) > 0:
self._ship(msgs) | [
3944
] |
def METHOD_NAME(self):
return True | [
1046,
22,
2437,
1537
] |
def METHOD_NAME(self) -> Optional[str]:
return pulumi.get(self, "accept_language") | [
1437,
2938
] |
f METHOD_NAME(self): | [
44,
1457
] |
def METHOD_NAME(obj, default=None, warn=False, fail=False):
"""Return a string representation of an object's units.
This function returns obj.attrs.get('units') with no processing, or it
converts the units of a Quantity object to a string and returns that. The
output should be suitable for saving to an HDF5 file."""
try:
return obj.attrs.get('units') #this works for things with attrs
except AttributeError:
try:
return str(obj.units) #this works for Quantities
except:
if warn:
print("Warning: no unit string found on " + str(obj))
if fail:
raise ValueError("No unit information was found on " + str(obj))
return default | [
19,
805,
144
] |
f METHOD_NAME(cls, *ids): | [
2656
] |
def METHOD_NAME():
m = pyo.ConcreteModel()
m.fs = idaes.core.FlowsheetBlock(dynamic=False)
m.fs.properties = iapws95.Iapws95ParameterBlock()
m.fs.unit1 = cmodels.Pump(property_package=m.fs.properties)
m.fs.unit2 = hmodels.HelmPump(property_package=m.fs.properties)
# set inputs
Fin = 1e4 # mol/s
hin = 4000 # J/mol
Pin = 101325 # Pa
Pout = 2 * Pin # Pa
eff = 0.7
m.fs.unit1.inlet.flow_mol[0].fix(Fin)
m.fs.unit2.inlet.flow_mol[0].fix(Fin)
m.fs.unit1.inlet.enth_mol[0].fix(hin)
m.fs.unit2.inlet.enth_mol[0].fix(hin)
m.fs.unit1.inlet.pressure[0].fix(Pin)
m.fs.unit2.inlet.pressure[0].fix(Pin)
m.fs.unit1.outlet.pressure[0].fix(Pout)
m.fs.unit2.outlet.pressure[0].fix(Pout)
m.fs.unit1.efficiency_pump.fix(eff)
m.fs.unit2.efficiency_pump.fix(eff)
m.fs.unit1.initialize()
m.fs.unit2.initialize()
assert pyo.value(m.fs.unit1.control_volume.work[0]) == pytest.approx(
pyo.value(m.fs.unit2.control_volume.work[0]), rel=1e-7
)
assert pyo.value(
m.fs.unit1.control_volume.properties_out[0].temperature
) == pytest.approx(
pyo.value(m.fs.unit2.control_volume.properties_out[0].temperature), rel=1e-7
) | [
9,
12525
] |
def METHOD_NAME(db_parameters):
"""Sets the session parameters in connection time."""
connection = snowflake.connector.connect(
protocol=db_parameters["protocol"],
account=db_parameters["account"],
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
database=db_parameters["database"],
schema=db_parameters["schema"],
session_parameters={"TIMEZONE": "UTC"},
)
ret = connection.cursor().execute("show parameters like 'TIMEZONE'").fetchone()
assert ret[1] == "UTC" | [
9,
240,
386
] |
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/onboardingStates/{sentinelOnboardingStateName}",
**self.url_parameters
) | [
274
] |
def METHOD_NAME(self):
"""
Get all data relevant to sync the session.
Args:
syncdata (dict): All syncdata values, based on
the keys given by self._attrs_to_sync.
"""
return {
attr: getattr(self, attr) for attr in settings.SESSION_SYNC_ATTRS if hasattr(self, attr)
} | [
19,
164,
365
] |
def METHOD_NAME(paths, within):
return _path_selector(paths, within, 'free_percent') | [
1472,
759,
3712,
1597
] |
def METHOD_NAME(self, text, tokenizer):
clean_text = self._clean_text(text)
assert len(clean_text) > 0, f'Invalid input string: {text}'
tokenized_input = tokenizer(
list(clean_text), return_length=True, is_split_into_words=True)
_inputs = dict()
_inputs['input_ids'] = tokenized_input['input_ids']
_inputs['seg_ids'] = tokenized_input['token_type_ids']
_inputs['seq_len'] = tokenized_input['seq_len']
return _inputs | [
666
] |
def METHOD_NAME(self, node_configs):
self.started = True
return [TestMechanic.Node(f"rally-node-{n}") for n in range(len(node_configs))] | [
447
] |
def METHOD_NAME(network, inlets, outlets):
if np.array(inlets).dtype == bool:
inlets = np.where(inlets)[0]
if np.array(outlets).dtype == bool:
outlets = np.where(outlets)[0]
flag = simulations.METHOD_NAME(
conns=network.conns,
occupied=np.ones(network.Nt, dtype=bool),
inlets=inlets,
outlets=outlets
)
return flag | [
-1
] |
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data") | [
112,
365
] |
def METHOD_NAME(self, fmt: V20PresFormat.Format = None) -> dict:
"""Return attached presentation request item.
Args:
fmt: format of attachment in list to decode and return
"""
target_format = (
fmt
if fmt
else next(
filter(
lambda ff: ff,
[V20PresFormat.Format.get(f.format) for f in self.formats],
),
None,
)
)
return (
target_format.get_attachment_data(
self.formats,
self.request_presentations_attach,
)
if target_format
else None
) | [
70
] |
def METHOD_NAME(self, statement):
tokens = []
for line in statement.lines:
strip_line = [t for t in line if t.type not in (Token.SEPARATOR, Token.EOL)]
line_pos = 0
exp_pos = 0
widths = self.get_widths(statement)
for token, width in zip(strip_line, widths):
if self.min_width:
exp_pos += max(width + self.formatting_config.space_count, self.min_width)
else:
exp_pos += width + self.formatting_config.space_count
if self.test_without_eol:
self.test_without_eol = False
exp_pos -= self.test_name_len
tokens.append(Token(Token.SEPARATOR, (exp_pos - line_pos) * " "))
tokens.append(token)
line_pos += len(token.value) + exp_pos - line_pos
tokens.append(line[-1])
statement.tokens = tokens | [
66,
925
] |
def METHOD_NAME(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
fusion_metadata = self._search_json(r'Fusion\.globalContent\s*=', webpage, 'fusion metadata', display_id)
entries = []
for item in traverse_obj(fusion_metadata, 'content_elements') or []:
item_type = traverse_obj(item, 'subtype')
if item_type == 'video':
brightcove_config = traverse_obj(item, ('embed', 'config'))
brightcove_url = self.BRIGHTCOVE_URL_TEMPLATE % (
traverse_obj(brightcove_config, 'brightcoveAccount') or '963482464001',
traverse_obj(brightcove_config, 'brightcoveVideoId')
)
entries.append(self.url_result(brightcove_url, BrightcoveNewIE))
elif item_type == 'youtube':
video_id_or_url = traverse_obj(item, ('referent', 'id'), ('raw_oembed', '_id'))
if video_id_or_url:
entries.append(self.url_result(video_id_or_url, ie='Youtube'))
if not entries:
raise ExtractorError('This article does not have a video.', expected=True)
playlist_title = (
traverse_obj(fusion_metadata, ('headlines', 'basic'))
or self._generic_title('', webpage)
)
return self.playlist_result(entries, display_id, playlist_title) | [
1866,
297
] |
def METHOD_NAME(self):
key = "test"
rotate = RandRotate90d(keys=key)
for p in TEST_NDARRAYS_ALL:
rotate.set_random_state(1323)
im = {key: p(self.imt[0])}
call_param = {"data": im}
rotated = rotate(**call_param)
# test lazy
test_resampler_lazy(rotate, rotated, call_param=call_param, seed=1323, output_key=key)
rotate.lazy = False
test_local_inversion(rotate, rotated, im, key)
expected = [np.rot90(channel, 0, (0, 1)) for channel in self.imt[0]]
expected = np.stack(expected)
assert_allclose(rotated[key], p(expected), type_test="tensor")
set_track_meta(False)
rotated = rotate(im)[key]
self.assertNotIsInstance(rotated, MetaTensor)
self.assertIsInstance(rotated, torch.Tensor)
set_track_meta(True) | [
9,
235
] |
def METHOD_NAME(self):
table = Table(data)
port_ids = []
for i in range(10):
port_ids.append(table.make_port())
assert port_ids == list(range(1, 11))
port = random.randint(0, 10)
table.update(data, port_id=port)
assert table.size() == 8
assert table.view().to_dict() == {"a": [1, 2, 3, 4] * 2, "b": ["a", "b", "c", "d"] * 2, "c": [True, False, True, False] * 2} | [
9,
-1,
237,
682
] |
def METHOD_NAME(self):
config_file = 'configs/examples/configuration.py'
cfg = Config.from_file(config_file)
self.assertEqual(cfg.a, 1)
self.assertEqual(cfg.b, obj['b']) | [
9,
1739
] |
def METHOD_NAME(UpdaterClass, measurement_model, prediction, measurement, omega):
# Calculate evaluation variables
innov_cov = 1/(1-omega)*measurement_model.noise_covar + 1/omega*prediction.covar
eval_measurement_prediction = GaussianMeasurementPrediction(
measurement_model.matrix() @ prediction.mean,
innov_cov,
cross_covar=prediction.covar @ measurement_model.matrix().T)
posterior_cov = np.linalg.inv(omega*np.linalg.inv(measurement.covar) +
(1-omega)*np.linalg.inv(prediction.covar))
posterior_mean = posterior_cov@(omega*np.linalg.inv(measurement.covar) @
measurement.state_vector + (1-omega) *
np.linalg.inv(prediction.covar)@prediction.state_vector)
eval_posterior = GaussianState(
posterior_mean,
posterior_cov)
# Initialise a Chernoff updater
updater = UpdaterClass(measurement_model=measurement_model, omega=omega)
# Get and assert measurement prediction
measurement_prediction = updater.predict_measurement(prediction)
assert(np.allclose(measurement_prediction.mean,
eval_measurement_prediction.mean,
0, atol=1.e-14))
assert(np.allclose(measurement_prediction.covar,
eval_measurement_prediction.covar,
0, atol=1.e-14))
assert(np.allclose(measurement_prediction.cross_covar,
eval_measurement_prediction.cross_covar,
0, atol=1.e-14))
# Perform and assert state update (without measurement prediction)
posterior = updater.update(SingleHypothesis(
prediction=prediction,
measurement=measurement))
assert(np.allclose(posterior.mean, eval_posterior.mean, 0, atol=1.e-14))
assert(np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.prediction, prediction))
assert (np.allclose(
posterior.hypothesis.measurement_prediction.state_vector,
measurement_prediction.state_vector, 0, atol=1.e-14))
assert (np.allclose(posterior.hypothesis.measurement_prediction.covar,
measurement_prediction.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.measurement, measurement))
assert(posterior.timestamp == prediction.timestamp)
# Perform and assert state update
posterior = updater.update(SingleHypothesis(
prediction=prediction,
measurement=measurement,
measurement_prediction=measurement_prediction))
assert(np.allclose(posterior.mean, eval_posterior.mean, 0, atol=1.e-14))
assert(np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.prediction, prediction))
assert (np.allclose(
posterior.hypothesis.measurement_prediction.state_vector,
measurement_prediction.state_vector, 0, atol=1.e-14))
assert (np.allclose(posterior.hypothesis.measurement_prediction.covar,
measurement_prediction.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.measurement, measurement))
assert(posterior.timestamp == prediction.timestamp) | [
9,
-1
] |
def METHOD_NAME(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ProcessPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ProcessPoolExecutor.__init__`.
[default: min(32, cpu_count() + 4)].
chunksize : int, optional
Size of chunks sent to worker processes; passed to
`concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
lock_name : str, optional
Member of `tqdm_class.get_lock()` to use [default: mp_lock].
"""
from concurrent.futures import ProcessPoolExecutor
if iterables and "chunksize" not in tqdm_kwargs:
# default `chunksize=1` has poor performance for large iterables
# (most time spent dispatching items to workers).
longest_iterable_len = max(map(length_hint, iterables))
if longest_iterable_len > 1000:
from warnings import warn
warn("Iterable length %d > 1000 but `chunksize` is not set."
" This may seriously degrade multiprocess performance."
" Set `chunksize=1` or more." % longest_iterable_len,
TqdmWarning, stacklevel=2)
if "lock_name" not in tqdm_kwargs:
tqdm_kwargs = tqdm_kwargs.copy()
tqdm_kwargs["lock_name"] = "mp_lock"
return _executor_map(ProcessPoolExecutor, fn, *iterables, **tqdm_kwargs) | [
356,
422
] |
def METHOD_NAME(self):
# Arrange
equity = Money(100000, USD)
# Act
result = self.sizer.calculate(
entry=Price.from_str("3.00000"),
stop_loss=Price.from_str("1.00000"),
equity=equity,
risk=Decimal("0.01"), # 1%
unit_batch_size=Decimal(1000),
)
# Assert
assert result == Quantity.zero() | [
9,
1593,
97,
805,
1318,
1646,
1762
] |
def METHOD_NAME(data):
conn.sendall(data)
return len(data) | [
353
] |
def METHOD_NAME(self):
"""A json stream with bad data results in an empty dataset."""
stream = StringIO(DKMS2)
self.parser = DkmsInfoParser(stream)
result = DkmsInfoResult()
self.parser.run(result)
self.assertEqual(result.dkms_info, {}) | [
9,
10028,
763,
15085
] |
def METHOD_NAME(self):
u = url.make_url("mysql:///dbname?max_idle=1234")
kwargs = {"basedir": 'my-base-dir'}
u, kwargs, max_conns = enginestrategy.special_case_mysql(u, kwargs)
exp = self.mysql_kwargs.copy()
exp['pool_recycle'] = 1234
self.assertEqual([str(u), max_conns, self.filter_kwargs(kwargs)],
["mysql:///dbname?charset=utf8&use_unicode=True", None,
exp]) | [
9,
4001,
232,
1150
] |
def METHOD_NAME(self, sid):
sub = SubscriberData(sid=SIDUtils.to_pb(sid))
self._store.add_subscriber(sub)
return (sid, sub) | [
238,
2042
] |
def METHOD_NAME(*args, **kwargs):
if very_verbose:
logging.debug(*args, **kwargs) | [
-1
] |
def METHOD_NAME(country_fip: str):
geocache = geonamescache.GeonamesCache()
dict_of_countries = geocache.get_countries()
list_of_countries = [d for d in dict_of_countries.values()]
list_of_country_fips = [item["fips"] for item in list_of_countries]
cleaned_list_of_country_fips = [
string for string in list_of_country_fips if string.strip()
]
if len(country_fip) > 2:
return False
elif type(country_fip) != str: # noqa: E721
return False
elif country_fip in cleaned_list_of_country_fips:
return True
else:
return False | [
137,
1205,
1078,
-1
] |
async def METHOD_NAME():
wrapper = func()
self.assertIsInstance(wrapper, types._GeneratorWrapper)
return await wrapper | [
1803
] |
def METHOD_NAME(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak)) | [
1030
] |
def METHOD_NAME(seed, inshape, start, stop, step, ctx, fname):
x_data = np.random.rand(*inshape)
# Numpy
s = [slice(start[axis], stop[axis], step[axis])
for axis in range(len(start))]
x_data_key = ref_slice(x_data, start, stop, step)
# NNabla
with nn.context_scope(ctx):
x = nn.Variable.from_numpy_array(x_data)
x_key = F.slice(x, start, stop, step)
x_key.forward()
assert_allclose(x_data_key, x_key.d) | [
9,
55,
76,
341
] |
def METHOD_NAME(self, test, reason):
super(TextTestResult, self).METHOD_NAME(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush() | [
238,
2423
] |
def METHOD_NAME(sagemaker_session):
sagemaker_session.wait_for_endpoint = Mock()
returned_name = sagemaker_session.create_endpoint(
endpoint_name=ENDPOINT_NAME, config_name=ENDPOINT_CONFIG_NAME
)
assert returned_name == ENDPOINT_NAME
sagemaker_session.sagemaker_client.create_endpoint.assert_called_once_with(
EndpointName=ENDPOINT_NAME, EndpointConfigName=ENDPOINT_CONFIG_NAME, Tags=[]
)
sagemaker_session.wait_for_endpoint.assert_called_once_with(ENDPOINT_NAME) | [
9,
129,
841,
618
] |
def METHOD_NAME(truth, pred, avg):
denom = mse(truth, 0, avg) / 2 + mse(0, pred, avg) / 2
num = mse(truth, pred, avg)
return 1 - xr.where(denom != 0, num / denom, 0) | [
7260
] |
def METHOD_NAME(parent, loc, change_freq, lastmod=None):
""" create and attach url element to argument *parent*
"""
url_ele = etree.SubElement(parent, NS + "url")
create_simple_sub_element(url_ele, 'loc', loc)
if lastmod is not None:
create_simple_sub_element(url_ele, "lastmod", lastmod)
create_simple_sub_element(url_ele, "changefreq", change_freq)
return url_ele | [
129,
274,
669
] |
def METHOD_NAME():
set_seed(args.seed)
tic_time = time.time()
if not os.path.exists(args.doccano_file):
raise ValueError("Please input the correct path of doccano file.")
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if len(args.splits) != 0 and len(args.splits) != 3:
raise ValueError("Only []/ len(splits)==3 accepted for splits.")
def _check_sum(splits):
return Decimal(str(splits[0])) + Decimal(str(splits[1])) + Decimal(str(splits[2])) == Decimal("1")
if len(args.splits) == 3 and not _check_sum(args.splits):
raise ValueError("Please set correct splits, sum of elements in splits should be equal to 1.")
with open(args.doccano_file, "r", encoding="utf-8") as f:
raw_examples = f.readlines()
def _create_ext_examples(
examples,
negative_ratio,
prompt_prefix="ζ
ζεΎε",
options=["ζ£ε", "θ΄ε"],
separator="##",
shuffle=False,
is_train=True,
schema_lang="ch",
):
entities, relations, aspects = convert_ext_examples(
examples, negative_ratio, prompt_prefix, options, separator, is_train, schema_lang
)
examples = entities + relations + aspects
if shuffle:
indexes = np.random.permutation(len(examples))
examples = [examples[i] for i in indexes]
return examples
def _create_cls_examples(examples, prompt_prefix, options, shuffle=False):
examples = convert_cls_examples(examples, prompt_prefix, options)
if shuffle:
indexes = np.random.permutation(len(examples))
examples = [examples[i] for i in indexes]
return examples
def _save_examples(save_dir, file_name, examples):
count = 0
save_path = os.path.join(save_dir, file_name)
with open(save_path, "w", encoding="utf-8") as f:
for example in examples:
f.write(json.dumps(example, ensure_ascii=False) + "\n")
count += 1
logger.info("Save %d examples to %s." % (count, save_path))
if len(args.splits) == 0:
if args.task_type == "ext":
examples = _create_ext_examples(
raw_examples,
args.negative_ratio,
args.prompt_prefix,
args.options,
args.separator,
args.is_shuffle,
schema_lang=args.schema_lang,
)
else:
examples = _create_cls_examples(raw_examples, args.prompt_prefix, args.options, args.is_shuffle)
_save_examples(args.save_dir, "train.txt", examples)
else:
if args.is_shuffle:
indexes = np.random.permutation(len(raw_examples))
index_list = indexes.tolist()
raw_examples = [raw_examples[i] for i in indexes]
else:
index_list = list(range(len(raw_examples)))
i1, i2, _ = args.splits
p1 = int(len(raw_examples) * i1)
p2 = int(len(raw_examples) * (i1 + i2))
train_ids = index_list[:p1]
dev_ids = index_list[p1:p2]
test_ids = index_list[p2:]
with open(os.path.join(args.save_dir, "sample_index.json"), "w") as fp:
maps = {"train_ids": train_ids, "dev_ids": dev_ids, "test_ids": test_ids}
fp.write(json.dumps(maps))
if args.task_type == "ext":
train_examples = _create_ext_examples(
raw_examples[:p1],
args.negative_ratio,
args.prompt_prefix,
args.options,
args.separator,
args.is_shuffle,
schema_lang=args.schema_lang,
)
dev_examples = _create_ext_examples(
raw_examples[p1:p2],
-1,
args.prompt_prefix,
args.options,
args.separator,
is_train=False,
schema_lang=args.schema_lang,
)
test_examples = _create_ext_examples(
raw_examples[p2:],
-1,
args.prompt_prefix,
args.options,
args.separator,
is_train=False,
schema_lang=args.schema_lang,
)
else:
train_examples = _create_cls_examples(raw_examples[:p1], args.prompt_prefix, args.options)
dev_examples = _create_cls_examples(raw_examples[p1:p2], args.prompt_prefix, args.options)
test_examples = _create_cls_examples(raw_examples[p2:], args.prompt_prefix, args.options)
_save_examples(args.save_dir, "train.txt", train_examples)
_save_examples(args.save_dir, "dev.txt", dev_examples)
_save_examples(args.save_dir, "test.txt", test_examples)
logger.info("Finished! It takes %.2f seconds" % (time.time() - tic_time)) | [
74,
197
] |
METHOD_NAME(self, matrix, arr, path, mask): | [
1389,
877,
24,
16228
] |
def METHOD_NAME():
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Stride': '2x3',
})
log.debug(conv.printUsage(z))
e= { 'n':64, 'c':256, 'h':20, 'w':14, 'k':1024, 'x':1, 'y':1, 'u':2, 'v':3 }
ec = ConvProblem(e, conv)
assert (ec.sizes == (5, 10, e['k'], e['n'], e['c']))
assert (ec.stridesA == (3, 28, e['h'] * e['w'], 71680)) | [
9,
5445
] |
def METHOD_NAME(
keys: t.Set[str],
session: Session = PROVIDED_SESSION,
) -> None:
(session.query(Variable).filter(Variable.key.in_(keys)).delete(synchronize_session=False)) | [
34,
2045
] |
def METHOD_NAME(self, dataset: Dataset):
"""
Run evaluation and returns metrics and predictions.
Args:
dataset (`datasets.Dataset`):
Dataset to use for the evaluation step.
"""
logger.info("***** Running evaluation *****")
all_preds = None
all_labels = None
onnx_inputs = {}
for step, inputs in enumerate(dataset):
has_labels = all(inputs.get(k) is not None for k in self.label_names)
if has_labels:
labels = tuple(np.array([inputs.get(name)]) for name in self.label_names)
if len(labels) == 1:
labels = labels[0]
else:
labels = None
for key in self.onnx_input_names:
if key in inputs:
onnx_inputs[key] = np.array([inputs[key]])
elif key == 'pixel_values':
onnx_inputs[key] = np.array([inputs['images']], dtype=np.float32)
preds = self.session.run(None, onnx_inputs)
if len(preds) == 1:
preds = preds[0]
all_preds = preds if all_preds is None else nested_concat(all_preds, preds, padding_index=-100)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=len(dataset)) | [
355,
1751
] |
def METHOD_NAME(doc, outputFile):
layoutNsUri = "http://projects.eml.org/bcb/sbml/level2"
layoutNs = LayoutPkgNamespaces(2, 4)
renderNsUri = "http://projects.eml.org/bcb/sbml/render/level2"
renderNs = RenderPkgNamespaces(2, 4)
prop = ConversionProperties(SBMLNamespaces(2,4))
prop.addOption('strict', False)
prop.addOption('setLevelAndVersion', True)
prop.addOption('ignorePackages', True)
doc.convert(prop)
docPlugin = doc.getPlugin("layout")
if docPlugin is not None:
docPlugin.setElementNamespace(layoutNsUri)
doc.getSBMLNamespaces().removePackageNamespace(3, 1, "layout", 1)
doc.getSBMLNamespaces().addPackageNamespace("layout", 1)
rdocPlugin = doc.getPlugin("render")
if rdocPlugin is not None:
rdocPlugin.setElementNamespace(renderNsUri)
doc.getSBMLNamespaces().removePackageNamespace(3, 1, "render", 1)
doc.getSBMLNamespaces().addPackageNamespace("render", 1)
writeSBMLToFile(doc, outputFile) | [
197,
366,
24,
541
] |
def METHOD_NAME(
vec: List[Any], alpha: float = 0.01, trials: int = 3000
) -> List[Tuple[int, float]]:
"""Returns the indices with values that are significant outliers, with their p-values"""
m = len(vec)
if m == 0:
return []
removed = 0
results = []
# pv = multinomial_pvalue(vec, trials)
# Hack: for now, set pv to alpha because computing exact multinomial p-values is too expensive
pv = alpha
# We use the Benjamin-Yekutieli procedure to control false-discovery rate.
# See https://en.wikipedia.org/wiki/False_discovery_rate#Benjamini%E2%80%93Yekutieli_procedure
c_m = harmonic_number(m)
if pv <= alpha:
while removed < m:
# While we remain below the threshold, remove (zero-out by
# setting to NaN) the max and add its index to the list of
# results with its p-value.
max_index = argmax(vec)
# See how unlikely this bin is to have occurred at random,
# assuming a uniform distribution into bins.
this_pvalue = one_sided_binomial_test_ge(
int(np.nansum(vec)), vec[max_index], 1 / (m - removed)
)
# print("max_index = ", max_index, "p-value = ", this_pvalue)
if this_pvalue <= (alpha * (removed + 1) / (m * c_m)):
results.append((max_index, this_pvalue))
vec[max_index] = np.nan
removed += 1
else:
break
return results | [
5720
] |
def METHOD_NAME():
# Simplest way to find out if we are in Jupyter without having to
# check imports
return "jupyter_core" in sys.modules | [
137,
12052
] |
def METHOD_NAME(self):
return self._table.METHOD_NAME | [
1249
] |
def METHOD_NAME(self, *_, **__):
pass | [
77,
69,
1165,
1798
] |
def METHOD_NAME(self):
"""
Check that if the value is an int or float that it is between
less than vmax and more than vmin if vmax and vmin are given
"""
if not isinstance(self.value, (int, float)):
return True
elif self.vmin != None and self.value < self.vmin:
return False
elif self.vmax != None and self.value > self.vmax:
return False
else:
return True | [
187,
661
] |
def METHOD_NAME(self, rtip_id):
pgp_key, tip_export = yield get_tip_export(self.request.tid,
self.session.user_id,
rtip_id,
self.request.language)
filename = "report-" + str(tip_export["tip"]["progressive"]) + ".zip"
files = yield prepare_tip_export(self.session.cc, tip_export)
zipstream = ZipStream(files)
stf = SecureTemporaryFile(self.state.settings.tmp_path)
with stf.open('w') as f:
for x in zipstream:
f.write(x)
f.finalize_write()
with stf.open('r') as f:
yield self.write_file_as_download(filename, f, pgp_key) | [
19
] |
def METHOD_NAME(_: WIDParams):
return True | [
5794,
5795,
-1
] |
def METHOD_NAME(self):
op = core.CreateOperator(
"Concat", self.inputs, [self.output, self.split_info], **self.args
)
return op | [
76
] |
def METHOD_NAME(item, update_dict, soft=False):
"""
Takes a Composite (item) and updates all entries with values from
update_dict. Updates can be soft in which case existing values are not
overwritten.
If item is of type string it is first converted to a Composite
"""
item = Composite(item)
for part in item.get_content():
if soft:
for key, value in update_dict.items():
if key not in part:
part[key] = value
else:
part.update(update_dict)
return item | [
3209,
86
] |
def METHOD_NAME():
"""
test that setenv can be invoked with dict
"""
ret = envstate.setenv("notimportant", {"test": "value"})
assert ret["changes"] == {"test": "value"} | [
9,
8033,
553
] |
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.LocalizableString"]:
"""Get the list of available event categories supported in the Activity Logs
Service.:code:`<br>`The current list includes the following: Administrative, Security,
ServiceHealth, Alert, Recommendation, Policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalizableString or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.monitor.v2015_04_01.models.LocalizableString]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2015-04-01"))
cls: ClsType[_models.EventCategoryCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EventCategoryCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | [
245
] |
def METHOD_NAME():
return CSRDataset64(
X_csr64.data, X_csr64.indptr, X_csr64.indices, y64, sample_weight64, seed=42
) | [
93,
2087,
126,
1036
] |
def METHOD_NAME(lines: Sequence[str], spaces: int = 1) -> str:
"""
Indent code block.
:param lines:
:type lines: str
:param spaces: times of four
:return:
"""
string_io = StringIO(str(lines))
indentation = spaces * 4
prefix = " " * indentation
lines = []
for line in string_io:
if line != "\n":
line = prefix + line
lines.append(line)
return "".join(lines) | [
4
] |
def METHOD_NAME(s: OptionalString) -> OptionalString:
if isinstance(s, str):
return " ".join(s.split())
return s | [
1137,
173
] |
def METHOD_NAME():
# Make a clone of "final" only if necessary.
if hasattr(METHOD_NAME, "used"):
return final.makeClone()
else:
METHOD_NAME.used = True
return final | [
19,
2316
] |
def METHOD_NAME(self):
got_error = False
conn_params = self.conn_dict.copy()
conn_params["allow_autapses"] = True
conn_params["allow_multapses"] = False
conn_params["indegree"] = self.N1 + 1
try:
self.setUpNetwork(conn_params)
except nest.kernel.NESTError:
got_error = True
self.assertTrue(got_error) | [
9,
168,
1107
] |
def METHOD_NAME(filename):
with open(filename, "r+") as profile:
lines = profile.read().split("\n")
was_fixed = False
fixed_profile = []
for lineno, line in enumerate(lines, 1):
if line[:12] in ("private-bin ", "private-etc ", "private-lib "):
fixed_line = f"{line[:12]}{sort_alphabetical(line[12:])}"
elif line[:13] in ("seccomp.drop ", "seccomp.keep "):
fixed_line = f"{line[:13]}{sort_alphabetical(line[13:])}"
elif line[:10] in ("caps.drop ", "caps.keep "):
fixed_line = f"{line[:10]}{sort_alphabetical(line[10:])}"
elif line[:8] == "protocol":
fixed_line = f"protocol {sort_protocol(line[9:])}"
elif line[:8] == "seccomp ":
fixed_line = f"{line[:8]}{sort_alphabetical(line[8:])}"
else:
fixed_line = line
if fixed_line != line:
was_fixed = True
print(
f"{filename}:{lineno}:-{line}\n"
f"{filename}:{lineno}:+{fixed_line}"
)
fixed_profile.append(fixed_line)
if was_fixed:
profile.seek(0)
profile.truncate()
profile.write("\n".join(fixed_profile))
profile.flush()
print(f"[ Fixed ] {filename}")
return 101
return 0 | [
1112,
337
] |
def METHOD_NAME(self):
self.group_watcher.GetAllSinceLastCall() | [
2427,
2478
] |
def METHOD_NAME(height, kernel_size, stride, pad, n_convs):
"""Height of spec after n convolutions with fixed kernel/stride/pad."""
for _ in range(n_convs):
height = (height - kernel_size + 2 * pad) // stride + 1
return height | [
1593,
72,
1306,
1877
] |
def METHOD_NAME(self, instance):
if not self.is_active(instance.data):
return
ext_mapping = (
instance.context.data["project_settings"]["maya"]["ext_mapping"]
)
if ext_mapping:
self.log.debug("Looking in settings for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:
self.scene_type = ext_mapping[family]
self.log.debug(
"Using {} as scene type".format(self.scene_type))
break
except KeyError:
# set scene type to ma
self.scene_type = "ma"
_scene_type = ("mayaAscii"
if self.scene_type == "ma"
else "mayaBinary")
dir_path = self.staging_dir(instance)
# named the file with imported reference
if instance.name == "Main":
return
tmp_name = instance.name + self.tmp_format
current_name = cmds.file(query=True, sceneName=True)
ref_scene_name = "{0}.{1}".format(tmp_name, self.scene_type)
reference_path = os.path.join(dir_path, ref_scene_name)
tmp_path = os.path.dirname(current_name) + "/" + ref_scene_name
self.log.debug("Performing extraction..")
# This generates script for mayapy to take care of reference
# importing outside current session. It is passing current scene
# name and destination scene name.
script = (""" | [
356
] |
def METHOD_NAME():
assert m.custom_function2(3) == 27
assert m.roundtrip(m.custom_function2)(3) == 27 | [
9,
343,
7640
] |
def METHOD_NAME(context, run_id, events, alt_s1=False, alt_s2=False,
cmt_version=None,
posrec_algos=('mlp', 'gcn', 'cnn')):
"""
Returns the corrected position for each position algorithm available,
without the need to reprocess event_basics, as the needed
information is already stored in event_basics.
:param alt_s1: False by default, if True it uses alternative S1 as main one
:param alt_s2: False by default, if True it uses alternative S2 as main one
:param cmt_version: CMT version to use (it can be a list of same
length as posrec_algos, if different versions are required for
different posrec algorithms, default 'local_ONLINE')
:param posrec_algos: list of position reconstruction algorithms to
use (default ['mlp', 'gcn', 'cnn'])
"""
posrec_algos = strax.to_str_tuple(posrec_algos)
if cmt_version is None:
fdc_config = context.get_single_plugin(run_id, 'event_positions').config['fdc_map']
if isinstance(fdc_config, str) and 'cmt://' in fdc_config:
cmt_version = straxen.URLConfig.split_url_kwargs(fdc_config)[1].get('version', 'ONLINE')
elif straxen.is_cmt_option(fdc_config):
cmt_version = fdc_config[1]
else:
raise ValueError('FDC map is not a CMT option, cannot infer cmt version.')
if (
isinstance(cmt_version, (tuple, list))
and len(cmt_version) != len(posrec_algos)
):
raise TypeError(f"cmt_version is a list but does not match the "
f"posrec_algos ({posrec_algos}) length.")
cmt_version = ((cmt_version, ) * len(posrec_algos)
if isinstance(cmt_version, str) else cmt_version)
# Get drift from CMT
ep = context.get_single_plugin(run_id, 'event_positions')
drift_speed = ep.electron_drift_velocity
drift_time_gate = ep.electron_drift_time_gate
dtype = load_dtypes(posrec_algos)
result = np.zeros(len(events), dtype=dtype)
s1_pre = 'alt_' if alt_s1 else ''
s2_pre = 'alt_' if alt_s2 else ''
drift_time = events['drift_time'] if not (alt_s1 or alt_s2) else events[s2_pre+'s2_center_time'] - events[s1_pre+'s1_center_time']
z_obs = - drift_speed * drift_time
for algo, v_cmt in zip(posrec_algos, cmt_version):
fdc_tmp = (f'fdc_map_{algo}', v_cmt, True)
map_tmp = straxen.get_correction_from_cmt(run_id, fdc_tmp)
itp_tmp = straxen.InterpolatingMap(straxen.common.get_resource(map_tmp, fmt='binary'))
itp_tmp.scale_coordinates([1., 1., -drift_speed])
orig_pos = np.vstack([events[f'{s2_pre}s2_x_{algo}'], events[f'{s2_pre}s2_y_{algo}'], z_obs]).T
r_obs = np.linalg.norm(orig_pos[:, :2], axis=1)
delta_r = itp_tmp(orig_pos)
z_obs = z_obs + drift_speed * drift_time_gate
# apply radial correction
with np.errstate(invalid='ignore', divide='ignore'):
r_cor = r_obs + delta_r
scale = r_cor / r_obs
with np.errstate(invalid='ignore'):
z_cor = -(z_obs ** 2 - delta_r ** 2) ** 0.5
invalid = np.abs(z_obs) < np.abs(delta_r)
z_cor[invalid] = z_obs[invalid]
result[f'x_{algo}'] = orig_pos[:, 0] * scale
result[f'y_{algo}'] = orig_pos[:, 1] * scale
result[f'r_{algo}'] = r_cor
result[f'r_naive_{algo}'] = r_obs
result[f'r_field_distortion_correction_{algo}'] = delta_r
result[f'theta_{algo}'] = np.arctan2(orig_pos[:, 1], orig_pos[:, 0])
result[f'z_{algo}'] = z_cor
result['z_naive'] = z_obs
return result | [
557,
8468,
2758
] |
f METHOD_NAME(self, base_dir, files): | [
93,
586,
151
] |
def METHOD_NAME():
if CU_VERSION == "cpu":
os.environ["CONDA_CPUONLY_FEATURE"] = "- cpuonly"
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = ""
return
os.environ["CONDA_CPUONLY_FEATURE"] = ""
if CU_VERSION in ("cu102", "cu110"):
os.environ["CONDA_CUB_CONSTRAINT"] = "- nvidiacub"
else:
os.environ["CONDA_CUB_CONSTRAINT"] = ""
major, minor = get_cuda_major_minor()
version_clause = version_constraint(f"{major}.{minor}")
if pytorch_major_minor < (1, 13):
toolkit = f"- cudatoolkit {version_clause}"
else:
toolkit = f"- pytorch-cuda {version_clause}"
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = toolkit | [
102,
4542,
11951,
1126
] |
def METHOD_NAME(query):
"""Returns a list of ReleaseResult or raises MusicBrainzError"""
musicbrainzngs.set_useragent(app.name, const.VERSION)
return [Release(r) for r in
musicbrainzngs.METHOD_NAME(query)["release-list"]] | [
1070,
205
] |
def METHOD_NAME(self, parameter_group, overflow, eigenvalue_enabled, block_eigenvalue={}):
if overflow and not eigenvalue_enabled:
return
self.step()
self.update_fp16_ratio()
for i in range(len(parameter_group)):
for p in parameter_group[i]:
if len(p.size()) > 1 and hasattr(p, "start_bits") and p.start_bits:
param_id = id(p)
if block_eigenvalue is None:
eigenvalue, layer_id = None, 0
else:
eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None,
0)
if eigenvalue is not None:
factor = 1 + math.floor(eigenvalue * 4)
p.data = self.compute_quantization(p.data, layer_id, factor)
else:
p.data = self.compute_quantization(p, layer_id) | [
1429
] |
def METHOD_NAME(item):
return OrderedDict([
('CONTENT TYPE', _get_value_by_names(item, ['contentType', 'content_type'])),
('KEY', _get_value(item, 'key')),
('VALUE', _get_value(item, 'value')),
('LAST MODIFIED', _format_datetime(_get_value_by_names(item, ['lastModified', 'last_modified']))),
('TAGS', _get_value(item, 'tags')),
('LABEL', _get_value(item, 'label')),
('LOCKED', _get_value(item, 'locked'))
]) | [
10802,
475,
275,
846
] |
METHOD_NAME( self, index ): | [
19,
875
] |
def METHOD_NAME(model):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
device = -1
pred = pipeline(
"text-classification",
model=model,
tokenizer=tokenizer,
device=device,
return_all_scores=True
)
return pred | [
129,
1148,
280,
578
] |
def METHOD_NAME():
while True:
ip = ["10", str(randint(150, 171)), "0", str(randint(70, 75))]
ipaddr = ".".join(ip)
process = subprocess.run(f"nc -w3 {ipaddr} 9090", shell=True, capture_output=True)
result = process.returncode
if result != 0:
print(f"{ipaddr}:9090 is not alive", flush = True)
continue
else:
print(f"***{ipaddr}:9090 is alive", flush=True)
if isAttacked(ipaddr):
print(f"***{ipaddr}:9090 is attacked already", flush=True)
continue
else:
print(f"***{ipaddr}:9090 is not attacked yet, launch the attack", flush=True)
return ipaddr | [
19,
243,
1030
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.