text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, key, batch_size, max_historical_metrics=None):
"""
Generator returning as many dictionaries containing `batch_size` Mor
objects as needed to iterate all the content of the cache. This has
to be iterated twice, like:
for batch in cache.mors_batch('key', 100):
for name, mor in batch:
# use the Mor object here
If max_historical_metrics is specified, the function will also limit
the size of the batch so that the integration never makes an API call
with more than this given amount of historical metrics.
"""
if max_historical_metrics is None:
max_historical_metrics = float('inf')
with self._mor_lock:
mors_dict = self._mor.get(key) or {}
batch = {}
nb_hist_metrics = 0
for mor_name, mor in iteritems(mors_dict):
if mor['mor_type'] not in REALTIME_RESOURCES and mor.get('metrics'):
# Those metrics are historical, let's make sure we don't have too
# many of them in the same batch.
if len(mor['metrics']) >= max_historical_metrics:
# Too many metrics to query for a single mor, ignore it
self.log.warning(
"Metrics for '%s' are ignored because there are more (%d) than what you allowed (%d) on vCenter Server", # noqa: E501
mor_name,
len(mor['metrics']),
max_historical_metrics,
)
continue
nb_hist_metrics += len(mor['metrics'])
if nb_hist_metrics >= max_historical_metrics:
# Adding those metrics to the batch would make it too big, yield it now
self.log.info("Will request %d hist metrics", nb_hist_metrics - len(mor['metrics']))
yield batch
batch = {}
nb_hist_metrics = len(mor['metrics'])
batch[mor_name] = mor
if len(batch) == batch_size:
self.log.info("Will request %d hist metrics", nb_hist_metrics)
yield batch
batch = {}
nb_hist_metrics = 0
if batch:
self.log.info("Will request %d hist metrics", nb_hist_metrics)
yield batch | [
13054,
2277
] |
def METHOD_NAME(browse_pgconf, browser, psql):
param = "check_function_bodies"
browser.select("#inputSearchSettings").send_keys(param)
browser.select("#buttonSearchSettings").click()
input_ = browser.select(f"input[name={param}]")
assert 'on' == input_.get_attribute('value')
out = psql("-Abt", c=f"SHOW {param};")
assert 'on' == out.strip()
browser.absent(f"#buttonResetDefault_{param}")
browser.select(".toggle-on").click()
sleep(.1)
input_ = browser.select(f"input[name={param}]")
assert 'off' == input_.get_attribute('value')
# Ensure On is not available.
with pytest.raises(ElementNotInteractableException):
browser.select(".toggle-on").click()
browser.select(".main form[role=form] button[type=submit]").click()
# Ensure Reset button appears
browser.select(f"#buttonResetDefault_{param}")
out = psql("-Abt", c=f"SHOW {param};")
assert 'off' == out.strip() | [
9,
201
] |
def METHOD_NAME(self):
class x(object):
def __init__(self, hash):
self.__hash = hash
def __hash__(self):
return self.__hash
self.assertEqual(hash(x(1)), 1)
if is_cli or is_32:
self.assertEqual(hash(x(1<<32)), 2)
else:
self.assertEqual(hash(x(1<<63)), 4) | [
9,
-1,
1571
] |
def METHOD_NAME(ds_trimmed_cache):
ds_trimmed_cache = ds_trimmed_cache.drop('123456')
return ds_trimmed_cache.graphql.METHOD_NAME() | [
135
] |
def METHOD_NAME(self) -> bool:
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if the code has already been executed.
'''
return not self._runner.ran | [
1209,
24,
6939
] |
def METHOD_NAME(config_file, site, key_id, rsa_private_key, rsa_public_key):
config_obj = config.read_json_config_file(config_file, check_commands=False, add_defaults=False)
site = config.get_site_from_config(config_obj, site)
site_config = config_obj["backup_sites"][site]
if key_id in site_config.setdefault("encryption_keys", {}):
raise CommandError("key_id {!r} already defined for site {!r} in {!r}".format(key_id, site, config_file))
site_config["encryption_keys"][key_id] = {
"private": rsa_private_key,
"public": rsa_public_key,
}
site_config["encryption_key_id"] = key_id
write_json_file(config_file, config_obj)
print("Saved new key_id {!r} for site {!r} in {!r}".format(key_id, site, config_file))
print(
"NOTE: The pghoard daemon does not require the 'private' key in its configuration file, "
"it can be stored elsewhere to improve security"
) | [
73,
219
] |
def METHOD_NAME():
m = pyo.ConcreteModel()
m.fs = idaes.core.FlowsheetBlock(dynamic=False)
m.fs.properties = iapws95.Iapws95ParameterBlock()
m.fs.unit = HelmMixer(
momentum_mixing_type=MomentumMixingType.equality,
property_package=m.fs.properties,
inlet_list=["i1", "i2", "i3"],
)
Fin1 = 1.1e4 # mol/s
hin1 = 4000 # J/mol
Pin1 = 1.2e5 # Pa
Fin2 = 1e4 # mol/s
hin2 = 5000 # J/mol
Pin2 = 2e5 # Pa
Fin3 = 1.3e4 # mol/s
hin3 = 6000 # J/mol
Pin3 = 3e5 # Pa
Pout = 1.5e5 # Pa
m.fs.unit.i1.flow_mol[0].fix(Fin1)
m.fs.unit.i1.enth_mol[0].fix(hin1)
m.fs.unit.i1.pressure[0] = Pin1
m.fs.unit.i2.flow_mol[0].fix(Fin2)
m.fs.unit.i2.enth_mol[0].fix(hin2)
m.fs.unit.i2.pressure[0] = Pin2
m.fs.unit.i3.flow_mol[0].fix(Fin3)
m.fs.unit.i3.enth_mol[0].fix(hin3)
m.fs.unit.i3.pressure[0] = Pin3
m.fs.unit.outlet.pressure[0].fix(Pout)
m.fs.unit.initialize()
Fout = Fin1 + Fin2 + Fin3
hout = (hin1 * Fin1 + hin2 * Fin2 + hin3 * Fin3) / Fout
assert pyo.value(m.fs.unit.outlet.flow_mol[0]) == pytest.approx(Fout, rel=1e-7)
assert pyo.value(m.fs.unit.outlet.enth_mol[0]) == pytest.approx(hout, rel=1e-7)
assert pyo.value(m.fs.unit.outlet.pressure[0]) == pytest.approx(Pout, rel=1e-7)
assert pyo.value(m.fs.unit.i1.pressure[0]) == pytest.approx(Pout, rel=1e-7)
assert pyo.value(m.fs.unit.i2.pressure[0]) == pytest.approx(Pout, rel=1e-7)
assert pyo.value(m.fs.unit.i3.pressure[0]) == pytest.approx(Pout, rel=1e-7) | [
9,
-1
] |
def METHOD_NAME(self):
if request.json is None:
raise BadRequest('Expected JSON payload')
invalid_fields = request.json.keys() - {'stop_on_match'}
if invalid_fields:
raise BadRequest('Invalid fields: {}'.format(', '.join(invalid_fields)))
if 'stop_on_match' in request.json:
self.email_tpl.stop_on_match = request.json['stop_on_match']
return jsonify_data(flash=False) | [
356,
1575
] |
def METHOD_NAME(self, host_pattern):
'''
Each host key can be a pattern, try to process it and add variables as needed
'''
(hostnames, port) = self._expand_hostpattern(host_pattern)
return hostnames, port | [
214,
1806
] |
def METHOD_NAME(self, *items):
"""
Add items to be sorted.
@param items: One or more items to be added.
@type items: I{item}
@return: self
@rtype: L{DepList}
"""
for item in items:
self.unsorted.append(item)
key = item[0]
self.index[key] = item
return self | [
238
] |
def METHOD_NAME(type): return \ | [
6874,
791,
475
] |
def METHOD_NAME():
v4.removeItem(r4) | [
188
] |
def METHOD_NAME():
# language=rst
"""
.. _dispatching:
Namespace dispatching
---------------------
I've already hinted at this above in the example where we do
``columns__foo__include=False``. This is an example of the powerful
namespace dispatch mechanism from iommi.declarative. It's inspired by the
query syntax of Django where you use ``__`` to jump namespace. (If
you're not familiar with Django, here's the gist of it: you can do
``Table.objects.filter(foreign_key__column='foo')``
to filter.) We really like this style and have expanded on it. It
enables functions to expose the *full* API of functions it calls while
still keeping the code simple. Here's a contrived example:
"""
from iommi.declarative.dispatch import dispatch
from iommi.declarative.namespace import EMPTY
@dispatch(
b__x=1, # these are default values. "b" here is implicitly
# defining a namespace with a member "x" set to 1
c__y=2,
)
def a(foo, b, c):
print('foo:', foo)
some_function(**b)
another_function(**c)
@dispatch(
d=EMPTY, # explicit namespace
)
def some_function(x, d):
print('x:', x)
another_function(**d)
def another_function(y=None, z=None):
if y:
print('y:', y)
if z:
print('z:', z)
# now to call a()!
a('q')
# output:
# foo: q
# x: 1
# y: 2
a('q', b__x=5)
# foo: q
# x: 5
# y: 2
a('q', b__d__z=5)
# foo: q
# x: 1
# z: 5
# y: 2
# language=rst
"""
This is really useful for the `Table` class as it means we can expose the full
feature set of the underling `Query` and `Form` classes by just
dispatching keyword arguments downstream. It also enables us to bundle
commonly used features in what we call "shortcuts", which are pre-packaged sets of defaults.
""" | [
9,
1194,
17753
] |
def METHOD_NAME(z: complex) -> complex:
"""Convert parallel impedance to serial impedance equivalent"""
z_sq_sum = z.real**2 + z.imag**2 or 10.0e-30
return complex(
z.real * z.imag**2 / z_sq_sum, z.real**2 * z.imag / z_sq_sum
) | [
1498,
24,
4364
] |
def METHOD_NAME(self):
"""
Test write().
"""
config = OCIO.Config.CreateRaw()
grp = OCIO.GroupTransform()
mat = OCIO.MatrixTransform()
mat.setOffset([0.1, 0.2, 0.3, 0])
fmd = mat.getFormatMetadata()
fmd.setID('matID')
fmd.setName('matName')
fmd.addChildElement('Description', 'Sample matrix.')
grp.appendTransform(mat)
range = OCIO.RangeTransform()
range.setMinInValue(0.1)
range.setMinOutValue(0.2)
range.setMaxInValue(1.1)
range.setMaxOutValue(1.4)
fmd = range.getFormatMetadata()
fmd.setID('rangeID')
fmd.setName('rangeName')
fmd.addChildElement('Description', 'Sample range.')
grp.appendTransform(range)
fmd = grp.getFormatMetadata()
fmd.setID('clfID')
fmd.addChildElement('Description', 'Sample clf file.')
self.assertEqual(grp.write(formatName='Academy/ASC Common LUT Format', config=config),
"""<?xml version="1.0" encoding="UTF-8"?> | [
9,
77,
7310
] |
def METHOD_NAME(self):
"""Test writing a worksheet with a blank cell with A1 notation."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write_blank("A1", None)
worksheet.write_blank("C2", None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp) | [
9,
1893,
399,
171,
13079
] |
def METHOD_NAME(config_dict: dict) -> List[str]:
urls: List[str] = []
ml_config = config_dict.get("scikit_learn")
if ml_config is not None:
urls += ml_config["model"]
prephysics_config = config_dict.get("prephysics")
if prephysics_config is not None:
for entry in prephysics_config:
urls += entry.get("model", [])
return urls | [
19,
578,
2248
] |
def METHOD_NAME(self):
class MyPattern(Template):
pattern = r"""
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@)
"""
m = Mapping()
m.bag = Bag()
m.bag.foo = Bag()
m.bag.foo.who = 'tim'
m.bag.what = 'ham'
s = MyPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertEqual(s.substitute(m), 'tim likes to eat a bag of ham')
class BadPattern(Template):
pattern = r"""
(?P<badname>.*) |
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@) |
"""
s = BadPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertRaises(ValueError, s.substitute, {})
self.assertRaises(ValueError, s.safe_substitute, {}) | [
9,
652,
345
] |
def METHOD_NAME(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output) | [
188,
1645,
1065
] |
f METHOD_NAME(self): | [
9,
1205,
462
] |
def METHOD_NAME(fn):
return unittest.skipUnless(cc_X_or_above(5, 3), "requires cc >= 5.3")(fn) | [
2423,
13562,
1298,
15041
] |
def METHOD_NAME(self):
adapters = getAdapters((self.context, ), ISetupDataSetList)
productnames = [name for name, adapter in adapters]
if len(productnames) == 1:
productnames[0] = 'bika.lims'
return productnames[len(productnames) - 1] | [
19,
155,
156
] |
def METHOD_NAME(self, request, queryset):
"""
Generate invoice report when data (queryset) is valid.
"""
logger.info('Generating invoice report for model {}'.format(
queryset.model
))
data = self._get_report_data(request, queryset)
content = self._get_pdf_content(data)
file_name = '{}-{}.pdf'.format(
self._invoice_report_name, data['id'],
)
return generate_pdf_response(content, file_name) | [
567,
2486,
339
] |
def METHOD_NAME(*args, **kwargs):
prompt_toolkit.print_formatted_text(*args, **kwargs) | [
38,
3171
] |
def METHOD_NAME(class_names):
"""Constructs dictionary from argument to class names.
# Arguments
class_names: List of strings containing the class names.
# Returns
Dictionary mapping integer to class name.
"""
return dict(zip(list(range(len(class_names))), class_names)) | [
19,
718,
24,
2
] |
def METHOD_NAME(path):
print('Starting Azure metadata file update')
config = {}
os.system(AZU_AUTH_CMD)
with open(path) as config_file:
config = yaml.load(config_file, yaml.SafeLoader)
try:
x86_image = get_azu_image_for_cmd(AZU_X86_CMD)
if x86_image is None:
print('Failed to fetch latest x86 image')
arm_image = get_azu_image_for_cmd(AZU_ARM_CMD)
if arm_image is None:
print('Failed to get latest arm image')
for region in config['regions']:
if arm_image is not None:
config['regions'][region]['arm_image'] = arm_image
if x86_image is not None:
config['regions'][region]['image'] = x86_image
print('Azure Metadata file updated successfully')
except Exception as e:
print('Failed to update azure metadata file')
print(e)
finally:
with open(path, "w") as file:
file.write(YB_LISCENCE)
file.write(yaml.dump(config, Dumper=yaml.SafeDumper)) | [
86,
1507,
773,
171
] |
def METHOD_NAME(mock_tools, tmp_path):
"""If the download archive is corrupted, the validator fails."""
# Mock the environment as if there is not WiX variable
mock_tools.os.environ.get.return_value = None
# Mock the download
wix_path = tmp_path / "tools" / "wix"
wix_zip_path = os.fsdecode(tmp_path / "tools" / "wix.zip")
wix_zip = MagicMock()
wix_zip.__fspath__.return_value = wix_zip_path
mock_tools.download.file.return_value = wix_zip
# Mock an unpack failure
mock_tools.shutil.unpack_archive.side_effect = EOFError
# Verify the install. This will trigger a download,
# but the unpack will fail
with pytest.raises(BriefcaseCommandError, match="interrupted or corrupted"):
WiX.verify(mock_tools)
# The environment was queried.
mock_tools.os.environ.get.assert_called_with("WIX")
# A download was initiated
mock_tools.download.file.assert_called_with(
url=WIX_DOWNLOAD_URL,
download_path=tmp_path / "tools",
role="WiX",
)
# The download was unpacked.
mock_tools.shutil.unpack_archive.assert_called_with(
os.fsdecode(wix_zip_path), extract_dir=os.fsdecode(wix_path)
)
# The zip file was not removed
assert wix_zip.unlink.call_count == 0 | [
9,
789,
180
] |
def METHOD_NAME(source, dest):
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
os.utime(dest, (st.st_atime, st.st_mtime)) | [
-1
] |
def METHOD_NAME(token, curr_dir: str, branching_factor: int, depth: int):
if depth > 2:
return
curr_dir = _normalize_remote_path(curr_dir)
for _ in range(branching_factor):
name = _random_name(10)
if not curr_dir:
remote_path = name
else:
remote_path = f"{curr_dir}/{name}"
operation = secrets.choice(["mkdir", "touch"])
_cmd = ["latch", operation, remote_path]
_run_and_verify(_cmd, "Success")
assert _file_exists(token, curr_dir, name)
if operation == "mkdir":
METHOD_NAME(token, remote_path, branching_factor, depth + 1)
_cmd = ["latch", "rm", remote_path]
_run_and_verify(_cmd, "Success") | [
22,
3456,
3236,
2203
] |
def METHOD_NAME(env):
result = []
p = PlatformFactory.from_env(env)
for pkg in p.get_installed_packages():
result.append(
dict(
env=env,
pm=p.pm,
pkg=pkg,
spec=p.get_package_spec(pkg.metadata.name),
)
)
return sorted(result, key=lambda item: item["pkg"].metadata.name) | [
416,
2773,
2913,
2217
] |
def METHOD_NAME(self, node: SidebarItem) -> None:
"Cache index of children in parent."
for row, item in enumerate(node.children):
item._row_in_parent = row
self.METHOD_NAME(item) | [
596,
1346
] |
def METHOD_NAME(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.read_helper(f)
# setdefault() works as in the dict interface
self.assertEqual(f.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(f[b'xxx'], b'foo')
f.close() | [
9,
2638,
1628
] |
def METHOD_NAME(self):
preds = deepcopy(self.preds)
target = deepcopy(self.target)
target_nan = deepcopy(self.target_nan)
# Regular loss
loss_true = MSELoss()(preds, target)
loss_ipu = MSELossIPU()(preds, target)
self.assertFalse(loss_true.isnan(), "Regular MSELoss is NaN")
self.assertAlmostEqual(
loss_true.item(), loss_ipu.item(), places=6, msg="Regular MSELoss is different"
)
# Regular loss with NaNs in target
not_nan = ~target_nan.isnan()
loss_true = MSELoss()(preds[not_nan], target[not_nan])
loss_ipu = MSELossIPU()(preds, target_nan)
self.assertFalse(loss_true.isnan(), "Regular MSELoss with target_nan is NaN")
self.assertFalse(loss_ipu.isnan(), "Regular MSELossIPU with target_nan is NaN")
self.assertAlmostEqual(
loss_true.item(), loss_ipu.item(), places=6, msg="Regular MSELoss with NaN is different"
) | [
9,
7517
] |
def METHOD_NAME(node):
return node[2] | [
2189
] |
METHOD_NAME(self, parent): | [
105,
29
] |
def METHOD_NAME(session: Session, switch: Switch, processor: User) -> None:
message = deferred_gettext("Deleted switch {}.").format(switch.host.name).to_json()
log_room_event(message, processor, switch.host.room)
session.delete(switch)
session.delete(switch.host) | [
34,
705
] |
def METHOD_NAME(self, value):
"""
Seek to a position in the currently playing stream
:param value: the position in seconds
:type value: float
"""
raise NotImplementedError | [
336
] |
def METHOD_NAME(self, arg):
self.pimpl.METHOD_NAME(arg)
return self | [
1459
] |
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem) | [
297,
365
] |
def METHOD_NAME():
parser = argparse.ArgumentParser( description='Creates a single GFF from the output of a few different model prediction tools (coding and non-coding)')
## output file to be written
parser.add_argument('-m', '--model_gff', type=str, required=True, help='Input (pass-through) GFF file' )
parser.add_argument('-o', '--output_gff', type=str, required=False, help='Output file to be written. Default=STDOUT' )
parser.add_argument('-b', '--barrnap_gff', type=str, required=False, help='GFF file from Barrnap prediction' )
parser.add_argument('-g', '--genomic_fasta', type=str, required=True, help='Source genomic FASTA file' )
parser.add_argument('-a', '--aragorn_out', type=str, required=False, help='Raw output file (with -w) from ARAGORN prediction' )
args = parser.parse_args()
(assemblies, features) = gff.get_gff3_features(args.model_gff)
utils.add_assembly_fasta(assemblies, args.genomic_fasta)
if args.barrnap_gff:
add_barrnap_features(assemblies, features, args.barrnap_gff)
if args.aragorn_out:
add_aragorn_features(assemblies, features, args.aragorn_out)
with open(args.output_gff, 'wt') as f:
gff.print_gff3_from_assemblies(ofh=f, assemblies=assemblies) | [
57
] |
def METHOD_NAME(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False | [
288,
6867
] |
def METHOD_NAME(n: int, tf: ivw.data.TransferFunction):
"""
create an n-gon using inviwopy.data.BasicMesh which comes with predefined buffers.
BasicMesh holds the following buffers:
- buffertraits::PositionsBuffer (ivw.glm.vec3)
- buffertraits::NormalBuffer (ivw.glm.vec3)
- buffertraits::TexCoordBuffer<3> (ivw.glm.vec3)
- buffertraits::ColorsBuffer (ivw.glm.vec4)
"""
mesh = ivw.data.BasicMesh(dt=ivw.data.DrawType.Triangles, ct=ivw.data.ConnectivityType.Fan)
normal = ivw.glm.vec3(0, 0, 1)
# add mid point
mesh.addVertex(ivw.glm.vec3(0, 0, 0), normal, ivw.glm.vec3(0, 0, 0), ivw.glm.vec4(1, 1, 1, 1))
angles = np.arange(n, dtype=np.float32) * np.pi * 2.0 / n
angles = np.append(angles, [0])
# create a list of individual vertices where each vertex is a tuple
# of position, normal, texcoord, and color
vertexdata = [(
ivw.glm.vec3(np.cos(angle), np.sin(angle), 0), # position
normal, # normal
ivw.glm.vec3(1.0, angle / (2.0 * np.pi), 0), # texture coordinate
tf.sample(angle / (2.0 * np.pi)) # color
) for angle in angles]
mesh.addVertices(vertexdata)
return mesh | [
-1,
2823,
1949
] |
def METHOD_NAME():
"""Test adding a large MetPy logo to a figure."""
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, size='large')
return fig | [
9,
238,
-1,
994,
1953
] |
f METHOD_NAME(self, rate=0.25, count=5, n=500): | [
9,
3544
] |
def METHOD_NAME(self):
diffs = {
SimpleDiff(type_, type_, None, None)
for type_ in DiffTypes.ALL
}
tables = get_tables_to_rebuild(diffs)
self.assertEqual(
tables,
set(DiffTypes.TYPES_FOR_REBUILD)
) | [
9,
527,
604,
44
] |
def METHOD_NAME(request_mock, user):
text = "clean_links step cleans other.com/some/path/"
result = parse(text, request_mock, user)
assert result["outgoing_links"] == ["other.com/some/path/"] | [
9,
8971,
548,
41,
157,
137,
4398
] |
def METHOD_NAME(self):
"""
Ensure supported args listed in issue_report_params are filled with
correct value.
"""
class CoolService(IssueTrackerService):
def get_stock_issue_report_args(self, case_run):
return {
"case_summary": "test case 1",
"verbose": True,
}
fake_tracker = f.IssueTrackerFactory(
service_url="http://localhost/",
tracker_product=self.tracker_product,
issue_report_endpoint="/new_issue",
# case_summary should be in the final URL with supported value.
issue_report_params="subject: hello\ncase_summary:",
)
service = CoolService(fake_tracker)
url = service.make_issue_report_url(self.case_run_1)
expected_url = "http://localhost/new_issue?subject=hello&case_summary=test%20case%201"
self.assert_url(expected_url, url) | [
9,
1080,
549,
616,
335
] |
def METHOD_NAME(predt: np.ndarray,
dtrain: xgb.DMatrix) -> Tuple[np.ndarray, np.ndarray]:
'''Squared Log Error objective. A simplified version for RMSLE used as
objective function.
:math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`
'''
predt[predt < -1] = -1 + 1e-6
grad = gradient(predt, dtrain)
hess = hessian(predt, dtrain)
return grad, hess | [
9702,
390
] |
def METHOD_NAME(x):
return isinstance(x, str) | [
137,
144
] |
def METHOD_NAME(output):
"""Parse bto.xml attachment output."""
stream = io.StringIO(output)
parser = BtoParser(stream)
result = BtoInfoResult()
parser.run(result)
return result.bto_info | [
214,
15537,
70,
146
] |
def METHOD_NAME(self, *args, **kwargs):
"""
Prepare for using OpenMPI library in toolchain environment
"""
super(OpenMPI, self).METHOD_NAME(*args, **kwargs)
# OpenMPI 2.x trips if path specified in $TMPDIR is too long
# see https://www.open-mpi.org/faq/?category=osx#startup-errors-with-open-mpi-2.0.x
self.orig_tmpdir = os.environ.get('TMPDIR')
ompi_ver = self.get_software_version(self.MPI_MODULE_NAME)[0]
if LooseVersion(ompi_ver) >= LooseVersion('2.0') and LooseVersion(ompi_ver) < LooseVersion('3.0'):
if len(self.orig_tmpdir) > 40:
tmpdir = tempfile.mkdtemp(prefix='/tmp/')
env.setvar('TMPDIR', tmpdir)
warn_msg = "Long $TMPDIR path may cause problems with OpenMPI 2.x, using shorter path: %s" % tmpdir
self.log.warning(warn_msg)
print_warning(warn_msg, silent=build_option('silent')) | [
123
] |
def METHOD_NAME(provider: OauthProvider) -> None:
"""
Refresh the tokens for a given provider.
This will use the stored refresh token to attempt a fetch of a new access/refresh
token pair. The new tokens will be updated in the Airflow Variable store. Raises
an AirflowSkipException if no tokens are defined for the provider.
"""
current_tokens = _var_get(OAUTH2_TOKEN_KEY)
if provider.name not in current_tokens:
raise AirflowSkipException(
f"Provider {provider.name} had no stored tokens, it may need to be "
f"authorized first."
)
refresh_token = current_tokens[provider.name]["refresh_token"]
secrets = _get_provider_secrets(provider.name)
client = OAuth2Session(secrets["client_id"])
log.info(f"Attempting token refresh for provider: {provider.name}")
# NOTE: Same as above, **secrets may be too much info for some requests.
new_tokens = client.refresh_token(
provider.refresh_url, refresh_token=refresh_token, **secrets
)
_update_tokens(provider.name, new_tokens) | [
1920
] |
def METHOD_NAME(checkpoint_path, layer=None):
state_dict = torch.load(checkpoint_path)
weights = state_dict["weights"]
config = state_dict["config"]
if layer is not None:
config["nLevelsGRU"] = layer
encoder = CPCEncoder(config["hiddenEncoder"])
ar_net = CPCAR(
config["hiddenEncoder"], config["hiddenGar"], False, config["nLevelsGRU"]
)
model = CPCModel(encoder, ar_net)
model.load_state_dict(weights, strict=False)
model.config = config
return model | [
557,
10220,
578
] |
def METHOD_NAME(self):
pass | [
72,
710
] |
def METHOD_NAME():
if _imp[0] is None:
is_pylibmc, memcache_key_t = False, bytes_to_str
try:
import pylibmc as memcache
is_pylibmc = True
except ImportError:
try:
import memcache
except ImportError:
raise ImproperlyConfigured(REQUIRES_BACKEND)
_imp[0] = (is_pylibmc, memcache, memcache_key_t)
return _imp[0] | [
512,
2192,
7410
] |
def METHOD_NAME(load_test_data_transactional):
"""
Ensure that dry run works as expected
:param load_test_data_transactional: The fixture providing the test data (see :meth:`~tests.conftest.load_test_data_transactional`)
:type load_test_data_transactional: tuple
"""
old_url = "https://integreat.app/augsburg/de/deutsche-sprache/sprachlernangebote/"
new_url = "https://integreat.app/augsburg/de/deutsche-sprache/sonstige-sprachlernangebote/"
assert Url.objects.filter(url=old_url).exists()
assert Link.objects.filter(url__url=old_url).count() == 1
assert not Url.objects.filter(url=new_url).exists()
assert not Link.objects.filter(url__url=new_url).exists()
with enable_listeners():
out, err = get_command_output("fix_internal_links")
assert "✔ Finished dry-run of fixing broken internal links." in out
assert not err
assert Url.objects.filter(
url=old_url
).exists(), "Old URL should not be removed during dry run"
assert (
Link.objects.filter(url__url=old_url).count() == 1
), "Old link should not be removed during dry run"
assert not Url.objects.filter(
url=new_url
).exists(), "New URL should not be created during dry run"
assert not Link.objects.filter(
url__url=new_url
).exists(), "New link should not be created during dry run" | [
9,
1112,
2026,
1127,
6829,
22
] |
def METHOD_NAME(line, path):
m = re.search(r"SPDX-License-Identifier: ([^\n]*)", line)
if m and m.group(1) != apache2_mit_spdx:
print(
"{0}: SPDX license identifier mismatch"
"(expecting {1}, found {2})".format(path, apache2_mit_spdx, m.group(1))
)
return SPDX_MISMATCH | [
909,
5498,
769
] |
METHOD_NAME( self ) : | [
9
] |
def METHOD_NAME(line, s=doc):
s[0] = '%s\n%s' % (s[0], line) | [
14844
] |
def METHOD_NAME(self):
self.thread_running.clear()
self.thread.join() | [
631
] |
def METHOD_NAME():
t = Thread(target=test_async_callbacks)
t.start()
t.join() | [
9,
958,
958,
2425
] |
def METHOD_NAME(self, name: str) -> Column:
... | [
105
] |
def METHOD_NAME(self):
v1 = OptVariable("a", 2, 0, 3, description="test")
assert v1.name == "a"
assert v1.value == 2
assert v1.lower_bound == 0
assert v1.upper_bound == 3
assert v1.description == "test"
v2 = OptVariable("b", 0, -1, 1)
assert v2.name == "b"
assert v2.value == 0
assert v2.lower_bound == -1
assert v2.upper_bound == 1
assert v2.description is None
with pytest.raises(OptVariablesError):
v3 = OptVariable("a", 2, 2.5, 3) | [
9,
12940
] |
def METHOD_NAME(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0 | [
118,
4226
] |
def METHOD_NAME(self):
METHOD_NAME = self.__class__.resolve_config(self)
return METHOD_NAME | [
200
] |
def METHOD_NAME(api):
"""Return true if the api is enabled by the current backend.
Args:
api (string): The api name.
Returns:
bool: ``True`` if the API is enabled by the current backend.
"""
return api in _enabled_apis | [
137,
1111
] |
def METHOD_NAME(self) -> str:
"""
The provisioning state of the pipeline at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__) | [
9,
482
] |
def METHOD_NAME(self):
return FileStorageFactory(resource_group=RESOURCE_GROUP.PREDICTOR, sync=True) | [
19,
948,
1155
] |
def METHOD_NAME(self):
seq1 = Sequence('CGTTATGTCTGTGAT')
seq2 = Sequence('CTGAATCGGTAGTGT')
obs = kmer_distance(seq1, seq2, 3, overlap=False)
exp = 0.8888888888888888
self.assertAlmostEqual(obs, exp) | [
9,
2820,
1168
] |
def METHOD_NAME():
"""Get the result for the testcase list page."""
params = dict(request.iterparams())
page = helpers.cast(request.get('page') or 1, int, "'page' is not an int.")
query = datastore_query.Query(data_types.Testcase)
crash_access.add_scope(query, params, 'security_flag', 'job_type',
'fuzzer_name_indices')
add_filters(query, params)
testcases, total_pages, total_items, has_more = query.fetch_page(
page=page, page_size=PAGE_SIZE, projection=FIELDS, more_limit=MORE_LIMIT)
items = []
for testcase in testcases:
regression_range = ''
fixed_range = ''
if testcase.regression and testcase.regression != 'NA':
regression_range = testcase.regression
if testcase.fixed and testcase.fixed != 'NA':
fixed_range = testcase.fixed
item = {
'id': testcase.key.id(),
'crashType': ' '.join(testcase.crash_type.splitlines()),
'crashStateLines': testcase.crash_state.strip().splitlines(),
'jobType': testcase.job_type,
'isClosed': not testcase.open,
'isFixed': testcase.fixed and testcase.fixed != 'NA',
'isReproducible': not testcase.one_time_crasher_flag,
'isSecurity': testcase.security_flag,
'isImpactSet': testcase.is_impact_set_flag,
'impacts': {
'extendedStable': testcase.impact_extended_stable_version,
'stable': testcase.impact_stable_version,
'beta': testcase.impact_beta_version,
'head': testcase.impact_head_version,
},
'regressionRange': regression_range,
'fixedRange': fixed_range,
'groupId': testcase.group_id,
'projectName': testcase.project_name,
'platform': testcase.platform,
'issueId': testcase.bug_information or testcase.group_bug_information,
'showImpacts': testcase.has_impacts(),
'impactsProduction': testcase.impacts_production()
}
if testcase.timestamp:
item['timestamp'] = utils.utc_datetime_to_timestamp(testcase.timestamp)
items.append(item)
helpers.log('Testcases', helpers.VIEW_OPERATION)
result = {
'hasMore': has_more,
'items': items,
'page': page,
'pageSize': PAGE_SIZE,
'totalItems': total_items,
'totalPages': total_pages,
}
return result, params | [
19,
1571
] |
def METHOD_NAME(test_id, *params, key_name_to_key_code=None):
yield ('id', test_id)
if key_name_to_key_code is not None:
yield ('key_name_to_key_code', key_name_to_key_code)
for iterables in params:
iterables = [
i if isinstance(i, (tuple, list)) else (i,)
for i in iterables
]
if len(iterables) < 2:
iterables.append(('',))
for combo_string, parse_result, in itertools.product(*iterables):
yield (
('parse', combo_string, parse_result),
) | [
567,
3206,
450
] |
def METHOD_NAME(self):
if not all(s.is_linked for s in self.inputs):
return
versR = Vector_generate(self.inputs['VersR'].sv_get())
versD = Vector_generate(self.inputs['VersD'].sv_get())
edgeR = self.inputs['EdgeR'].sv_get()
edgeD = self.inputs['EdgeD'].sv_get()
verts_out = []
edges_out = []
mesh_join = self.mesh_join
versD, remove, edgeD = match_long_repeat([versD, edgeR[0], edgeD])
versD = [[v - versD[0][0] for v in vD] for vD in versD]
for vc, edg in zip(versR, edgeR):
if mesh_join:
v_out = []
v_out_app = v_out.append
e_out = []
e_out_app = e_out.append
for e, verD, edgD in zip(edg, versD, edgeD):
# for every edge or for objectR???
d_vector = verD[-1].copy()
d_scale = d_vector.length
d_vector.normalize()
# leave for now
if not mesh_join:
v_out = []
v_out_app = v_out.append
e_vector = vc[e[1]] - vc[e[0]]
e_scale = e_vector.length
e_vector.normalize()
q1 = d_vector.rotation_difference(e_vector)
mat_s = Matrix.Scale(e_scale / d_scale, 4)
mat_r = Matrix.Rotation(q1.angle, 4, q1.axis)
mat_l = Matrix.Translation(vc[e[0]])
mat = mat_l @ mat_r @ mat_s
offset = len(v_out)
for v in verD:
v_out_app((mat @ v)[:])
if mesh_join:
for edge in edgD:
e_out_app([i + offset for i in edge])
else:
verts_out.append(v_out)
edges_out.append(edgD)
if mesh_join:
verts_out.append(v_out)
edges_out.append(e_out)
if self.outputs['Vertices'].is_linked:
self.outputs['Vertices'].sv_set(verts_out)
if self.outputs['Edges'].is_linked:
self.outputs['Edges'].sv_set(edges_out) | [
356
] |
def METHOD_NAME(self, executor_rank: int = 0, process_group: ProcessGroup = None):
"""
This context manager is used to allow one process to execute while blocking all
other processes in the same process group. This is often useful when downloading is required
as we only want to download in one process to prevent file corruption.
Example:
>>> from colossalai.cluster import DistCoordinator
>>> dist_coordinator = DistCoordinator()
>>> with dist_coordinator.priority_execution():
>>> dataset = CIFAR10(root='./data', download=True)
Args:
executor_rank (int): the process rank to execute without blocking, all other processes will be blocked
process_group (ProcessGroup, optional): process group to use for the executor rank check. Defaults to None, which refers to the default process group.
"""
rank = dist.get_rank(group=process_group)
should_block = rank != executor_rank
if should_block:
self.block_all(process_group)
yield
if not should_block:
self.block_all(process_group) | [
2654,
2046
] |
def METHOD_NAME(address1: str,
address2: str, read_ext_input: int,
pv_module: str,
num: int) -> None:
# für Openwb Version 1.9 können mit der bisherigen Parametrisierung zwei IP-Adressen ausgelesen werden
# ebenso wird bei Speicheraufruf Speicher und PV ausgelesen
# in openwb v2.0 geht nur noch eine IP Adresse und die Pv muss separat ausgelesen werden
addresses = [address for address in [address1, address2] if address != "none"]
read_ext = (read_ext_input == 1)
log.debug('e3dc IP-Adresse1: %s', address1)
log.debug('e3dc IP-Adresse2: %s', address2)
log.debug('e3dc read_ext: %s', read_ext)
log.debug('e3dc pv_module: %s', pv_module)
log.debug('e3dc id: %d', num)
soc = 0 # type: Union[int, float]
power = 0
pv_external = 0
pv = 0
pv_other = pv_module != "none"
for address in addresses:
log.debug("Ip: %s, read_external %s pv_other %s", address, read_ext, pv_other)
with modbus.ModbusTcpClient_(address, port=502) as client:
soc_tmp, power_tmp = read_bat(client)
soc += soc_tmp
power += power_tmp
pv_tmp = read_inverter(client)
if read_ext:
pv_external_tmp = read_external_inverter(client)
else:
pv_external_tmp = 0
pv += pv_tmp
pv_external += pv_external_tmp
soc /= len(addresses)
log.debug("Soc %d power %d pv %d pv_external %d",
soc, power, pv, pv_external)
counter_import, counter_export = sim_count(power, prefix="speicher")
get_bat_value_store(1).set(BatState(power=power, soc=soc, imported=counter_import, exported=counter_export))
# pv_other sagt aus, ob WR definiert ist, und dessen PV Leistung auch gilt
# wenn 0 gilt nur PV und pv_external aus e3dc
pv_total = pv + pv_external
# Wenn wr1 nicht definiert ist, gilt nur die PV Leistung die hier im Modul ermittelt wurde
# als gesamte PV Leistung für wr1
# Wenn wr1 definiert ist, gilt die bestehende PV Leistung aus Wr1 und das was hier im Modul ermittelt wurde
# als gesamte PV Leistung für wr1
if pv_other:
pv_total = pv_total + files.pv[0].power.read()
log.debug("wr update pv_other %s pv_total %d", pv_other, pv_total)
_, exported_pv = sim_count(pv_total, prefix="pv")
get_inverter_value_store(num).set(InverterState(exported=exported_pv, power=pv_total)) | [
203,
3116,
4951
] |
def METHOD_NAME(self, project):
config_builds_not_enabled = f"""
projects_and_groups:
{project.path_with_namespace}:
project_settings:
builds_access_level: disabled
variables:
foo:
key: FOO
value: 123
"""
run_gitlabform(config_builds_not_enabled, project)
with pytest.raises(GitlabListError):
# variables will NOT be available without builds_access_level in ['private', 'enabled']
project.variables.list() | [
9,
5545,
1295
] |
def METHOD_NAME():
description = """Given a directory path, and a list of module names,
copies each module into the dir, if the cam being built has the symbols needed
for the module to run. That is: put the right modules into the cam build.
Used during the build to copy the correct modules into a zip.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("dest_dir",
help="path to copy modules into, e.g. "
"platform/200D.101/zip/ML/modules")
parser.add_argument("module_names",
nargs="*",
help="e.g. adv_int edmac")
args = parser.METHOD_NAME()
if len(args.module_names) == 0:
print("No module names given")
sys.exit(1)
dest_dir = args.dest_dir
if not os.path.isdir(dest_dir):
print("dest_dir didn't exist or wasn't a dir: '%s'"
% dest_dir)
sys.exit(2)
# We assume this is only called from platform dirs (is this true?)
# with INSTALL_MODULES_DIR as target and that has this form:
# <full path>/platform/200D.101/zip/ML/modules
path_components = dest_dir.split(os.path.sep)
if "platform" not in path_components:
print("dest_dir didn't contain 'platform' dir: '%s'" % dest_dir)
sys.exit(3)
platform_index = path_components.index("platform")
cam_dir = path_components[platform_index + 1]
if len(cam_dir.split(".")) != 2:
# We expect cam dirs to have a dot separating cam from FW version.
# Not a great test but does work at present.
print("cam dir looked weird: '%s'" % cam_dir)
sys.exit(4)
cam_dir = os.path.join(dest_dir, "..", "..", "..")
if "magiclantern.sym" not in os.listdir(cam_dir):
# This happens for ML_SRC_PROFILE = minimal builds.
# SJE TODO: make minimal builds not try to copy modules?
# For now, we simply don't try. This might cause problems
# if a cam dir should produce the sym file but failed to do so?
print("No magiclantern.sym in cam dir, can't include modules: '%s'"
% dest_dir)
sys.exit(0)
args.cam_dir = os.path.relpath(cam_dir)
return args | [
214,
335
] |
def METHOD_NAME(self, args):
self._model_name = args["model_name"]
self.inflight_thread_count = 0
self.inflight_thread_count_lck = threading.Lock() | [
15
] |
def METHOD_NAME(logo_files, output_path):
resized_images = []
for logo_file in logo_files:
name, ext = os.path.splitext(os.path.basename(logo_file))
new_name = "{}{}".format(name, ext)
out_name = pjoin(output_path, "resized/", new_name)
print("Resizing image: {name}".format(name=logo_file))
values = {"name": logo_file, "out_name": out_name, "dimensions": DIMENSIONS}
cmd = "convert %(name)s -resize %(dimensions)s %(out_name)s"
cmd = cmd % values
subprocess.call(cmd, shell=True)
resized_images.append(out_name)
return resized_images | [
1128,
3669
] |
def METHOD_NAME(func_self, *args, **wkwargs):
cassette = args[0]
args = args[1:]
if inject_cassette:
func(func_self, cassette, *args, **wkwargs)
else:
func(func_self, *args, **wkwargs)
self.hide_sensitive_data(cassette) | [
2354
] |
def METHOD_NAME(self, conf: ConfigTree) -> None:
self._conf = conf
self.dashboard_group_ids_to_skip = self._conf.get_list(DASHBOARD_GROUP_IDS_TO_SKIP, [])
restapi_query = self._build_restapi_query()
self._extractor = ModeDashboardUtils.create_mode_rest_api_extractor(restapi_query=restapi_query,
conf=self._conf)
# Payload from RestApiQuery has timestamp which is ISO8601. Here we are using TimestampStringToEpoch to
# transform into epoch and then using DictToModel to convert Dictionary to Model
transformers: List[Transformer] = []
timestamp_str_to_epoch_transformer = TimestampStringToEpoch()
timestamp_str_to_epoch_transformer.METHOD_NAME(
conf=Scoped.get_scoped_conf(self._conf, timestamp_str_to_epoch_transformer.get_scope()).with_fallback(
ConfigFactory.from_dict({FIELD_NAME: 'created_timestamp', })))
transformers.append(timestamp_str_to_epoch_transformer)
dashboard_group_url_transformer = TemplateVariableSubstitutionTransformer()
dashboard_group_url_transformer.METHOD_NAME(
conf=Scoped.get_scoped_conf(self._conf, dashboard_group_url_transformer.get_scope()).with_fallback(
ConfigFactory.from_dict({VAR_FIELD_NAME: 'dashboard_group_url',
TEMPLATE: 'https://app.mode.com/{organization}/spaces/{dashboard_group_id}'})))
transformers.append(dashboard_group_url_transformer)
dashboard_url_transformer = TemplateVariableSubstitutionTransformer()
dashboard_url_transformer.METHOD_NAME(
conf=Scoped.get_scoped_conf(self._conf, dashboard_url_transformer.get_scope()).with_fallback(
ConfigFactory.from_dict({VAR_FIELD_NAME: 'dashboard_url',
TEMPLATE: 'https://app.mode.com/{organization}/reports/{dashboard_id}'})))
transformers.append(dashboard_url_transformer)
dict_to_model_transformer = DictToModel()
dict_to_model_transformer.METHOD_NAME(
conf=Scoped.get_scoped_conf(self._conf, dict_to_model_transformer.get_scope()).with_fallback(
ConfigFactory.from_dict(
{MODEL_CLASS: 'databuilder.models.dashboard.dashboard_metadata.DashboardMetadata'})))
transformers.append(dict_to_model_transformer)
self._transformer = ChainedTransformer(transformers=transformers) | [
176
] |
def METHOD_NAME(self, tooltip):
return tooltip.tipwindow and tooltip.tipwindow.winfo_viewable() | [
137,
-1,
8528
] |
def METHOD_NAME(url, apikey_sonarr, sonarr_series_id=None):
url_sonarr_api_series = url + "/api/{0}series/{1}?apikey={2}".format(
'' if get_sonarr_info.is_legacy() else 'v3/', sonarr_series_id if sonarr_series_id else "", apikey_sonarr)
try:
r = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers)
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code:
raise requests.exceptions.HTTPError
logging.exception("BAZARR Error trying to get series from Sonarr. Http error.")
return
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get series from Sonarr. Connection Error.")
return
except requests.exceptions.Timeout:
logging.exception("BAZARR Error trying to get series from Sonarr. Timeout Error.")
return
except requests.exceptions.RequestException:
logging.exception("BAZARR Error trying to get series from Sonarr.")
return
else:
result = r.json()
if isinstance(result, dict):
return [result]
else:
return r.json() | [
19,
4045,
280,
6646,
58
] |
def METHOD_NAME(ckt_data, primitive_library):
at = Annotate(ckt_data, primitive_library)
at.annotate()
for ckt in ckt_data:
if isinstance(ckt, SubCircuit):
assert ckt.pins, f"floating module found {ckt.name}"
assert len(ckt.pins) == len(
set(ckt.pins)
), f"duplicate pins found in module {ckt.name}, {ckt.pins}"
for ele in ckt.elements:
if isinstance(ckt_data.find(ele.model), SubCircuit):
assert len(ele.pins) == len(ckt_data.find(ele.model).pins), "incorrect subckt instantiation" | [
1380,
3106
] |
def METHOD_NAME(json):
if ":" in json["name"]:
return False
if json["section"] == "user":
return json["name"] not in SKIP_OPTIONS
else:
return json["name"] in BUILTIN_OPTIONS | [
527,
1881
] |
f METHOD_NAME(self): | [
9,
837
] |
def METHOD_NAME(msocket, group, intf):
intf_addr = get_local_ip(intf)
if (intf_addr == None): return
i = socket.inet_aton(intf_addr)
g = socket.inet_aton(group)
msocket.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, g + i) | [
3457,
1083
] |
def METHOD_NAME(self) -> str:
"""
The kind of the data connector
Expected value is 'APIPolling'.
"""
return pulumi.get(self, "kind") | [
1253
] |
def METHOD_NAME(self, serialised: str) -> int:
parser = configparser.ConfigParser(interpolation = None)
parser.read_string(serialised)
format_version = int(parser.get("general", "version")) #Explicitly give an exception when this fails. That means that the file format is not recognised.
setting_version = int(parser.get("metadata", "setting_version", fallback = "0"))
return format_version * 1000000 + setting_version | [
19,
2610,
281
] |
def METHOD_NAME(self, token: contextvars.Token) -> None:
"""
Reset the underlying storage to the previous context state.
Resets the storage state to the `context` associated with the provided token. After
resetting storage state, any additional `connections` created in the `old context` are
copied into the `current context`.
:param token:
The token corresponding to the `context` to which the storage state has to
be reset. Typically, this token is obtained by calling the
:meth:`set<tortoise.connection.ConnectionHandler.set>` method of this class.
"""
current_storage = self._get_storage()
self._conn_storage.METHOD_NAME(token)
prev_storage = self._get_storage()
for alias, conn in current_storage.items():
if alias not in prev_storage:
prev_storage[alias] = conn | [
656
] |
def METHOD_NAME(
ss,
levels: tuple[int] | list[int],
valid_ilocs: npt.NDArray[np.intp],
sort_labels: bool = False,
) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:
"""
For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,
where `ax_coords` are the coordinates along one of the two axes of the
destination sparse matrix, and `ax_labels` are the labels from `ss`' Index
which correspond to these coordinates.
Parameters
----------
ss : Series
levels : tuple/list
valid_ilocs : numpy.ndarray
Array of integer positions of valid values for the sparse matrix in ss.
sort_labels : bool, default False
Sort the axis labels before forming the sparse matrix. When `levels`
refers to a single level, set to True for a faster execution.
Returns
-------
ax_coords : numpy.ndarray (axis coordinates)
ax_labels : list (axis labels)
"""
# Since the labels are sorted in `Index.levels`, when we wish to sort and
# there is only one level of the MultiIndex for this axis, the desired
# output can be obtained in the following simpler, more efficient way.
if sort_labels and len(levels) == 1:
ax_coords = ss.index.codes[levels[0]][valid_ilocs]
ax_labels = ss.index.levels[levels[0]]
else:
levels_values = lib.fast_zip(
[ss.index.get_level_values(lvl).values for lvl in levels]
)
codes, ax_labels = factorize(levels_values, sort=sort_labels)
ax_coords = codes[valid_ilocs]
ax_labels = ax_labels.tolist()
return ax_coords, ax_labels | [
1043,
24,
2227
] |
def METHOD_NAME(self):
return len(self.object) > 0 | [
137,
15063
] |
def METHOD_NAME(cls):
cls.tempdir = TemporaryDirectory()
cls.graph_repr = 'succinct'
cls.anno_repr = 'column'
cls._build_graph(TEST_DATA_DIR + '/transcripts_100.fa',
cls.tempdir.name + '/graph',
20, cls.graph_repr, 'basic', '--mask-dummy')
res = cls._get_stats(f'{cls.tempdir.name}/graph{graph_file_extension[cls.graph_repr]}')
assert(res.returncode == 0)
out = res.stdout.decode().split('\n')[2:]
assert('k: 20' == out[0])
assert('nodes (k): 46960' == out[1])
assert('mode: basic' == out[2])
cls._annotate_graph(
TEST_DATA_DIR + '/transcripts_100.fa',
cls.tempdir.name + '/graph' + graph_file_extension[cls.graph_repr],
cls.tempdir.name + '/annotation',
cls.anno_repr,
extra_params='--count-kmers'
) | [
0,
1,
2
] |
def METHOD_NAME():
# -- Configure the argument parser
parser = argparse.ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description='description:\n Unit test wrapper for the adding AIE Partitions')
parser.add_argument('--resource-dir', nargs='?', default=".", help='directory containing data to be used by this unit test')
args = parser.parse_args()
# Validate that the resource directory is valid
if not os.path.exists(args.resource_dir):
raise Exception("Error: The resource-dir '" + args.resource_dir +"' does not exist")
if not os.path.isdir(args.resource_dir):
raise Exception("Error: The resource-dir '" + args.resource_dir +"' is not a directory")
# Prepare for testing
xclbinutil = "xclbinutil"
# Start the tests
print ("Starting test")
# ---------------------------------------------------------------------------
step = "1) Add the AIE parition to the xclbin image"
sectionname = "Flavor"
workingXclbin = "aiePartition.xclbin"
aiePartitionOutput1 = "aie_partition_output1.json"
aiePartition = os.path.join(args.resource_dir, "aie_partition.json")
aiePartitionOutputExpected = os.path.join(args.resource_dir, "aie_partition_expected.json")
cmd = [xclbinutil, "--add-section", "AIE_PARTITION["+sectionname+"]:JSON:" + aiePartition,
"--dump-section", "AIE_PARTITION["+sectionname+"]:JSON:" + aiePartitionOutput1,
"--output", workingXclbin,
"--force",
"--trace"
]
execCmd(step, cmd)
jsonFileCompare(aiePartitionOutputExpected, aiePartitionOutput1)
# ---------------------------------------------------------------------------
step = "2) Read and dump the AIE parition"
aiePartitionOutput2 = "aie_partition_output2.json"
cmd = [xclbinutil, "--input", workingXclbin,
"--dump-section", "AIE_PARTITION["+sectionname+"]:JSON:" + aiePartitionOutput2,
"--force",
"--trace"
]
execCmd(step, cmd)
jsonFileCompare(aiePartitionOutputExpected, aiePartitionOutput2)
# 1a) Check for the existance of the dummy PDI images
aiePartition1111PDIExpected = os.path.join(args.resource_dir, "1111.txt")
aiePartition1111PDIOutput = "00000000-0000-0000-0000-000000001111.pdi"
textFileCompare(aiePartition1111PDIExpected, aiePartition1111PDIOutput)
# 1b) Check for the existance of the dummy PDI images
aiePartition2222PDIExpected = os.path.join(args.resource_dir, "2222.txt")
aiePartition2222PDIOutput = "00000000-0000-0000-0000-000000002222.pdi"
textFileCompare(aiePartition2222PDIExpected, aiePartition2222PDIOutput)
# ---------------------------------------------------------------------------
# If the code gets this far, all is good.
return False | [
57
] |
async def METHOD_NAME(_=None):
# There's only one page of results (the `get_settings` result), so we return the awaited result directly
return await result | [
19,
243
] |
def METHOD_NAME(
client_request,
platform_admin_user,
service_one,
mock_get_inbound_number_for_service,
mock_get_all_letter_branding,
mock_get_service_organisation,
mock_get_free_sms_fragment_limit,
no_reply_to_email_addresses,
no_letter_contact_blocks,
single_sms_sender,
mock_get_service_data_retention,
):
client_request.login(platform_admin_user)
return functools.partial(client_request.get, "main.service_settings", service_id=service_one["id"]) | [
19,
549,
817,
1174
] |
def METHOD_NAME(firewall_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIPv6FirewallRuleResult]:
"""
Gets an IPv6 firewall rule.
Azure REST API version: 2021-11-01.
:param str firewall_rule_name: The name of the firewall rule.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
... | [
19,
497,
7118,
650,
446,
146
] |
def METHOD_NAME(ds):
ds._settings["base_url"] = "/prefix/"
path = ds.urls.path("/")
assert path == "/prefix/"
path = ds.urls.path(path)
assert path == "/prefix/" | [
9,
157,
2350,
2430,
870,
130,
2152
] |
def METHOD_NAME(self, mode=None):
qio = QemuIoInteractive('-r', '-f', 'raw', nbd_uri)
self.assertReadOk(qio.cmd('read 0 512'))
result = self.remove_export('exp', mode)
self.assertExistingClients(result)
self.assertReadOk(qio.cmd('read 0 512'))
qio.close()
result = self.remove_export('exp', mode)
self.assert_qmp(result, 'return', {})
self.assertExportNotFound('exp') | [
74,
9,
188,
4217,
707,
1209
] |
def METHOD_NAME():
with pytest.raises(RasterioIOError):
rasterio.shutil.copyfiles('trash', 'whatever') | [
9,
9414,
180
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.