text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(group_id):
return {gh.hash for gh in GroupHash.objects.filter(group_id=group_id)} | [
846,
2012
] |
def METHOD_NAME(self) -> Optional[bool]:
"""
Indicates whether this activity log alert is enabled. If an activity log alert is not enabled, then none of its actions will be activated.
"""
return pulumi.get(self, "enabled") | [
1111
] |
def METHOD_NAME():
# TODO : Test discovery based on file system paths
return [
("Core language features", [
("test_suite.py", "basic test suite"),
("test_rmethods.py", "reflected methods"),
("test_bytes.py", "bytes"),
("test_classes.py", "classes"),
("test_decorators.py", "decorators"),
("test_descriptors.py", "descriptors"),
("test_dict.py", "dicts"),
("test_exceptions.py", "exceptions"),
("test_exec.py", "exec / eval"),
("test_file.py", "file open / read"),
("test_generators.py", "generators"),
("test_import.py", "imports"),
("test_iterators.py", "iterators"),
("test_jsobjects.py", "Javascript objects"),
("test_list.py", "lists"),
("test_memoryview.py", "memoryview"),
("test_numbers.py", "numbers"),
("test_pattern_matching.py", "pattern matching"),
("test_print.py", "print"),
("test_set.py", "sets"),
("test_special_methods.py", "special methods"),
("test_strings.py", "strings"),
("test_fstrings.py", "f-strings"),
("test_string_format.py", "string format"),
("test_string_methods.py", "string methods")
]),
("DOM interface", [
("dom.py", "DOM"),
("test_webcomponent.py", "Web component")
]),
("Issues", [
("issues_gc.py", "issues (GC)"),
("issues_bb.py", "issues (BB)"),
("issues.py", "issues")
]),
("Modules", [
("test_ast.py", "ast"),
("test_aio.py", "browser.aio"),
("test_ajax.py", "browser.ajax"),
("test_highlight.py", "browser.highlight"),
("test_browser_html.py", "browser.html"),
("test_binascii.py", "binascii"),
("test_bisect.py", "bisect"),
("test_builtins.py", "builtins"),
("test_code.py", "code"),
("test_collections.py", "collections"),
("test_copy.py", "copy"),
("test_dataclasses.py", "dataclasses"),
("test_datetime.py", "datetime"),
("test_decimals.py", "decimals"),
("test_functools.py", "functools"),
("test_compression.py", "gzip / zlib"),
("test_hashlib.py", "hashlib"),
("test_io.py", "io"),
("test_itertools.py", "itertools"),
("test_json.py", "json"),
("test_math.py", "math"),
("test_pickle.py", "pickle"),
("test_random.py", "random"),
("test_re.py", "re"),
("test_secrets.py", "secrets"),
("test_storage.py", "storage"),
("test_struct.py", "struct"),
("test_sys.py", "sys"),
("test_types.py", "types"),
("test_unicodedata.py", "unicodedata"),
("test_unittest.py", "unittest"),
("test_urllib.py", "urllib"),
#("test_indexedDB.py", "indexedDB"),
#("test_time.py", "time"),
])
] | [
1765,
9278,
9,
468
] |
def METHOD_NAME():
return | [
57
] |
def METHOD_NAME(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is MVA in a DataFrame column. For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
col
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(mva.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(mva.is_valid)
else:
return df.applymap(mva.is_valid)
return mva.is_valid(df) | [
187,
654,
17810
] |
def METHOD_NAME(self):
"""
Updates the size variables of all unknowns.
"""
for u in self.unknowns:
if u.name == 'f_hot':
u.setSizes(nr=self.nr, nxi=self.hot_nxi, np=self.hot_np)
elif u.name == 'f_re':
u.setSizes(nr=self.nr, nxi=self.re_nxi, np=self.re_np)
elif u.name == 'n_i':
u.setSizes(nZ0=self.nZ0, nr=self.nr)
elif u.size == self.nr: # Fluid quantity
u.setSizes(nr=self.nr)
elif u.size == self.nions*self.nr: # N_i or W_i
u.setSizes(nions=self.nions, nr=self.nr) | [
86,
46,
3120
] |
def METHOD_NAME(self):
with decimal.localcontext() as ctx:
ctx.prec = 20
# Note: All the instances are actually floats, so larger prec.
numbers = list(map(Decimal, [
0.8462362724718449, 0.7053497034927213, 0.18865925698056718,
0.4231422803809822, 0.1454769973533604, 0.11586558849935513,
0.04047872493132432, 0.09511048123106225, 0.4932086961083296,
0.12377443905156471]))
exp = number.infer_quantum_from_list(numbers)
self.assertEqual(-21, exp) | [
9,
1852,
9293,
98
] |
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags") | [
114
] |
def METHOD_NAME():
bpy.utils.unregister_class(MaskListNode) | [
2468
] |
def METHOD_NAME(self):
return self.parent_standard_permit_condition_id | [
935,
6310,
405,
147
] |
def METHOD_NAME(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip() | [
756,
1356
] |
async def METHOD_NAME(howdy_context: Path):
# This test is contrived to pretend we're in a Docker container right now and giving
# a known test file path as the image_flow_location
packager = DockerPackager(
dockerfile=howdy_context / "Dockerfile",
image_flow_location=str(howdy_context / "howdy.py"),
)
manifest = await packager.package(howdy)
unpackaged_howdy = await manifest.unpackage()
assert unpackaged_howdy("dude") == "howdy, dude!" | [
9,
-1,
261,
224
] |
def METHOD_NAME(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer | [
111,
6915
] |
def METHOD_NAME(self) -> Mapping[str, str]:
return pulumi.get(self, "tags") | [
114
] |
def METHOD_NAME(
Out,
A,
Residual,
ln_input,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
Residual += row * stride
ln_input += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32)
a = a + res
tl.store(ln_input + cols, a, mask=cols < N)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# write-back
tl.store(Out + cols, out, mask=mask) | [
94,
387,
1770,
1885
] |
def METHOD_NAME(m):
return getattr, (m.__objclass__, m.__name__) | [
332,
103,
2701
] |
def METHOD_NAME(lmax: int) -> o3.Irreps:
return o3.Irreps([(2 * l + 1, (l, 1)) for l in range(lmax + 1)]) | [
14007,
3069
] |
def METHOD_NAME():
# Arrange
with open("/code/tests/test_data/V3_2_0/settings.yaml") as old_settings:
old_settings_dict = yaml.safe_load(old_settings.read())
# Act
new_settings = V3_2_1.migrate(old_settings_dict)
# Assert
assert V3_2_1.validate(new_settings)
# manage_tftp removed
assert "manage_tftp" not in new_settings | [
9,
2744,
7278,
988,
1170
] |
def METHOD_NAME(
input_type: Union[str, dict], skip_convert_error=False, as_str=False
):
"""Converting the TerminusDB datatypes into Python types, it will not detect self define types (i.e. object properties) so if converting object properties, skip_convert_error need to be True.
Parameters
----------
input_type : str or dict
TerminusDB datatypes to be converted.
skip_convert_error : bool
Will an error be raised if the datatype given cannot be convert to Python types. If set to True (and as_type set to False) and type cannot be converted, the type will be returned back without convertion.
as_str : bool
Will convert the type and present it as string (e.g. used in constructing scripts). It will always skip convert error if set to True.
"""
if as_str:
skip_convert_error = True
invert_type = {v: k for k, v in CONVERT_TYPE.items()}
if isinstance(input_type, dict):
if input_type["@type"] == "List":
if as_str:
return f'List[{METHOD_NAME(input_type["@class"], as_str=True)}]'
else:
return List[METHOD_NAME(input_type["@class"], as_str=True)]
elif input_type["@type"] == "Set":
if as_str:
return f'Set[{METHOD_NAME(input_type["@class"], as_str=True)}]'
else:
return Set[METHOD_NAME(input_type["@class"], as_str=True)]
elif input_type["@type"] == "Optional":
if as_str:
return f'Optional[{METHOD_NAME(input_type["@class"], as_str=True)}]'
else:
return Optional[METHOD_NAME(input_type["@class"], as_str=True)]
else:
raise TypeError(
f"Input type {input_type} cannot be converted to Python type"
)
elif input_type in invert_type:
if as_str:
return invert_type[input_type].__name__
return invert_type[input_type]
elif skip_convert_error:
if as_str:
return f"'{input_type}'"
return input_type
else:
raise TypeError(f"Input type {input_type} cannot be converted to Python type") | [
280,
-1,
44
] |
def METHOD_NAME(self):
root = self.prog["drgn_test_radix_tree_one"].address_of_()
self.assertIdentical(
list(radix_tree_for_each(root)),
[(666, Object(self.prog, "void *", 0xDEADB00))],
) | [
9,
2896,
151,
43,
1353,
206
] |
def METHOD_NAME():
@tvm.script.ir_module
class Before:
@R.function
def main(x: R.Tensor):
R.func_attr({"global_symbol": "main"})
y = R.const([1, 2])
z = R.call_packed("test.vm.add", x, y, sinfo_args=(R.Tensor))
return z
@tvm.script.ir_module
class Expected:
@T.prim_func
def __vmtir__main(ctx_ptr: T.handle, r: T.handle, c: T.handle, f: T.handle):
# function attr dict
T.func_attr({"global_symbol": "__vmtir__main"})
# body
T.anylist_setitem_call_packed(
r,
2,
"test.vm.add",
T.anylist_getitem(r, 0),
T.anylist_getitem(c, 0),
)
T.anylist_setitem_call_packed(r, 1, "vm.builtin.copy", T.anylist_getitem(r, 2))
before = Before
expected = Expected
after = get_tir_mod(before)
assert_structural_equal(expected, after) | [
9,
3402,
128
] |
def METHOD_NAME():
env_file = Path(ClientSettings.Config.env_file)
log.info("Creating %s", f"{env_file}")
kwargs = {}
kwargs["OSPARC_API_URL"] = input("OSPARC_API_URL: ").strip() or None
kwargs["OSPARC_USER_EMAIL"] = (
input("OSPARC_USER_EMAIL: ") or getpass.getuser() + "@itis.swiss"
)
kwargs["OSPARC_USER_PASSWORD"] = getpass.getpass()
with open(env_file, "wt") as fh:
for key, value in kwargs.items():
print(key, value)
if value is not None:
fh.write(f"{key}={value}\n")
log.info("%s: %s", f"{env_file=}", f"{env_file.exists()=}") | [
176
] |
def METHOD_NAME(event):
if event.inaxes == ax:
found = False
for line in lines:
cont, ind = line.contains(event)
if cont:
update_annot(line, ind)
annot.set_visible(True)
fig.canvas.draw_idle()
found = True
break
if not found and annot.get_visible():
annot.set_visible(False)
fig.canvas.draw_idle() | [
1935
] |
ync def METHOD_NAME(self): | [
958,
103
] |
def METHOD_NAME(self):
return self._docker_env.METHOD_NAME | [
82
] |
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(extensions):
files = []
if ARGS.filenames:
files = ARGS.filenames
else:
for root, dirs, walkfiles in os.walk(ARGS.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for dpath in ARGS.skip:
if dpath in dirs:
dirs.remove(dpath)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
if not has_ignored_header(pathname):
outfiles.append(pathname)
return outfiles | [
19,
1537
] |
def METHOD_NAME():
"""test that image names dont change when using a custom repo because that
breaks pull-through caching proxies in use by various customers."""
extra_globals = {
"blackboxExporterEnabled": True,
"postgresqlEnabled": True,
"prometheusPostgresExporterEnabled": True,
"pspEnabled": True,
"veleroEnabled": True,
}
repository = "quay.io/astronomer"
public_repo_docs = render_chart(values={"global": extra_globals})
private_repo_docs = render_chart(
values={
"global": {
"privateRegistry": {"enabled": True, "repository": repository},
**extra_globals,
}
},
)
# should be same number of images regardless of where they come from
assert len(public_repo_docs) == len(private_repo_docs)
search_string = "spec.template.spec.containers[*].image"
differently_named_images = []
for public_repo_doc, private_repo_doc in zip(public_repo_docs, private_repo_docs):
public_repo_images = jmespath.search(search_string, public_repo_doc)
private_repo_images = jmespath.search(search_string, private_repo_doc)
if public_repo_images is not None or private_repo_images is not None:
assert len(public_repo_images) == len(private_repo_images)
for public_repo_image, private_repo_image in zip(
public_repo_images, private_repo_images
):
if public_repo_image != private_repo_image:
print(
f"image name differs when using a private repo named same as public - {public_repo_image} vs {private_repo_image}"
)
differently_named_images.append(
(public_repo_image, private_repo_image)
)
assert not differently_named_images, differently_named_images | [
9,
547,
510,
1230,
660,
83,
983
] |
def METHOD_NAME(self):
return '' | [
19,
1008,
459
] |
f METHOD_NAME(self): | [
9,
1047,
75,
303,
4045,
623,
688
] |
def METHOD_NAME(self, WMOs, embed):
if has_ipython:
import IPython
dsh = open_sat_altim_report(WMO=WMOs, embed=embed, api_server=mocked_server_address)
if has_ipython and embed is not None:
if has_ipywidgets:
if embed == "dropdown":
assert isinstance(dsh, Callable)
assert isinstance(dsh(2901623), IPython.display.Image)
if embed == "slide":
assert isinstance(dsh, Callable)
else:
assert dsh is None
else:
assert isinstance(dsh, dict) | [
9,
1452,
10270,
-1,
339
] |
METHOD_NAME(self, doc=None): | [
9,
2369
] |
def METHOD_NAME(self):
self.oks_sigmas = [0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, 0.062, 1.007, 1.007, 0.087, 0.087, 0.089, 0.089]
self.flip_indexes = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] | [
0,
1
] |
def METHOD_NAME(times):
"""Format `times` into a string representing approximate milliseconds.
`times` is a collection of durations in seconds.
"""
ordered = sorted(times)
n = len(ordered) - 1
assert n >= 0
lower = int(ordered[int(math.floor(n * 0.05))] * 1000)
upper = int(ordered[int(math.ceil(n * 0.95))] * 1000)
if upper == 0:
return "< 1ms"
elif lower == upper:
return f"~ {lower}ms"
else:
return f"~ {lower}-{upper} ms" | [
275,
3665
] |
def METHOD_NAME():
"""deapext.algorithms: Testing eaAlphaMuPlusLambdaCheckpoint"""
deap.creator.create('fit', deap.base.Fitness, weights=(-1.0,))
deap.creator.create(
'ind',
numpy.ndarray,
fitness=deap.creator.__dict__['fit'])
population = [deap.creator.__dict__['ind'](x)
for x in numpy.random.uniform(0, 1,
(10, 2))]
toolbox = deap.base.Toolbox()
toolbox.register("evaluate", deap.benchmarks.sphere)
toolbox.register("mate", lambda x, y: (x, y))
toolbox.register("mutate", lambda x: (x,))
toolbox.register("select", lambda pop, mu: pop)
with mock.patch('pickle.dump'):
with mock.patch('bluepyopt.deapext.algorithms.open',
mock.mock_open()):
population, hof, logbook, history = \
bluepyopt.deapext.algorithms.eaAlphaMuPlusLambdaCheckpoint(
population=population,
toolbox=toolbox,
mu=1.0,
cxpb=1.0,
mutpb=1.0,
ngen=2,
stats=None,
halloffame=None,
cp_frequency=1,
cp_filename='cp_test',
continue_cp=False)
import random
with mock.patch('pickle.load', return_value={'population': population,
'logbook': logbook,
'history': history,
'parents': None,
'halloffame': None,
'rndstate': random.getstate(),
'generation': 1}):
with mock.patch('bluepyopt.deapext.algorithms.open',
mock.mock_open()):
new_population, hof, logbook, history = \
bluepyopt.deapext.algorithms.eaAlphaMuPlusLambdaCheckpoint(
population=population,
toolbox=toolbox,
mu=1.0,
cxpb=1.0,
mutpb=1.0,
ngen=0,
stats=None,
halloffame=None,
cp_frequency=1,
cp_filename='cp_test',
continue_cp=True)
for ind1, ind2 in zip(new_population, population):
assert list(ind1) == list(ind2) | [
9,
13414,
1139,
2283,
222,
1778,
1830
] |
def METHOD_NAME(
self,
mock_find_executable,
mock_makedirs,
mock_chdir):
mock_find_executable.return_value = self.expected_compiler
# specific `build_arch` mocks
with mock.patch(
"builtins.open",
mock.mock_open(read_data="#define ZLIB_VERSION 1.1\nfoo")
) as mock_open_zlib, mock.patch(
"pythonforandroid.recipes.python3.sh.Command"
) as mock_sh_command, mock.patch(
"pythonforandroid.recipes.python3.sh.make"
) as mock_make, mock.patch(
"pythonforandroid.recipes.python3.sh.cp"
) as mock_cp:
self.recipe.build_arch(self.arch)
# make sure that the mocked methods are actually called
recipe_build_dir = self.recipe.get_build_dir(self.arch.arch)
sh_command_calls = {
f"{recipe_build_dir}/config.guess",
f"{recipe_build_dir}/configure",
}
for command in sh_command_calls:
self.assertIn(
mock.call(command),
mock_sh_command.mock_calls,
)
mock_open_zlib.assert_called()
self.assertEqual(mock_make.call_count, 1)
for make_call, kw in mock_make.call_args_list:
self.assertIn(
f'INSTSONAME={self.recipe._libpython}', make_call
)
mock_cp.assert_called_with(
"pyconfig.h", join(recipe_build_dir, 'Include'),
)
mock_makedirs.assert_called()
mock_chdir.assert_called() | [
9,
56,
2837
] |
def METHOD_NAME(self):
so = copy.deepcopy(self.sting_obj)
assert np.allclose(so.guefus, [4, 5, 2])
so.guefus = np.random.randint(0, 4, 3)
ts = so.to_xarray()
new_so = DummyStingrayObj.from_xarray(ts)
_check_equal(so, new_so) | [
9,
7724,
3544
] |
def METHOD_NAME(self) -> Cost:
""" This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively """
if self.race == Race.Zerg and Attribute.Structure.value in self.attributes:
return Cost(self._proto.mineral_cost - 50, self._proto.vespene_cost, self._proto.build_time)
return self.cost | [
1955,
-1,
8468
] |
def METHOD_NAME(self, task_id, task_update_request, **kwargs): ... | [
1575,
620,
147,
41,
721,
100
] |
def METHOD_NAME(list_a, mask_l, level, flags, idx=0):
mask_out, ind_true, ind_false, result_t, result_f = [], [], [], [], []
if level > 1:
if isinstance(list_a, (list, tuple, np.ndarray)):
result = [mask_out, ind_true, ind_false, result_t, result_f]
for idx, sub_list in enumerate(list_a):
for i, res in enumerate(METHOD_NAME(sub_list, mask_l, level - 1, flags, idx)):
result[i].append(res)
else:
indx = min(len(mask_l)-1, idx)
mask = mask_l[indx]
if type(list_a) == np.ndarray:
return mask_array(list_a, mask, flags)
else:
return mask_list(list_a, mask, flags)
return mask_out, ind_true, ind_false, result_t, result_f | [
361,
365
] |
f METHOD_NAME(self): | [
9,
297,
9518,
219,
280,
612,
422
] |
def METHOD_NAME(self):
response = self.client.get(reverse("manage-cdn"))
expected_url = "/admin/login/?next=/admin/cdn/"
self.assertRedirects(
response, expected_url, fetch_redirect_response=False
) | [
9,
139,
1970
] |
def METHOD_NAME(self):
"""
Test :func:`colour.models.rgb.transfer_functions.dji_d_log.\ | [
9,
390,
2300,
-1,
390
] |
def METHOD_NAME():
import numpy as np
b = np.sign(np.int32(-32))
return b | [
5790,
1961
] |
def METHOD_NAME(self):
"""
Retrieves the operational status of the device
Returns:
A boolean value, True if device is operating properly, False if not
"""
return True | [
19,
452
] |
def METHOD_NAME(
request_mock, user, read_thread, default_category
):
reply_thread(read_thread, is_unapproved=True)
make_read_aware(request_mock, default_category)
assert default_category.is_read
assert not default_category.is_new | [
9,
253,
41,
5089,
72,
623,
203
] |
def METHOD_NAME(features):
encoding = DirectColorEncoding(feature='custom_colors')
values = encoding(features)
assert_colors_equal(values, list(features['custom_colors'])) | [
9,
4234
] |
def METHOD_NAME(self):
path = os.path.join(FIXTURES_DIR, 'd_t1.yaml')
runner = CliRunner()
diffed = runner.invoke(grep, ['Sammy', path])
assert 0 == diffed.exit_code
assert "{'matched_values': ['root[2][0]']}\n" == diffed.output | [
9,
462,
3433
] |
def METHOD_NAME(self) -> int:
return int(self.obj.service().startHandle()) | [
549,
276
] |
def METHOD_NAME(defn, prefix, location=None):
s = ''
for instance in defn.instances:
drive_undriven_clock_types_in_inst(defn, instance)
if location and instance.loc:
x = location[0] + instance.loc[0]
y = location[1] + instance.loc[1]
z = location[2] + instance.loc[2]
instlocation = (x,y,z)
else:
instlocation = None
s += compileinstance(instance, prefix, instlocation)
s += '# wire instance outputs\n'
return s + compileinputs(defn, prefix) | [
17607
] |
def METHOD_NAME(orbit):
mod = orbit['modulus']
conductor = orbit['conductor']
orbit_index = orbit['orbit_index']
orbit_letter = cremona_letter_code(orbit_index - 1)
orbit_label = "{}.{}".format(mod, orbit_letter)
order = orbit['order']
is_odd = parity_string(orbit['parity'])
is_prim = _is_primitive(orbit['is_primitive'])
results = []
for num in orbit['galois_orbit']:
results.append((
mod,
num,
conductor,
orbit_label,
order,
is_odd,
is_prim,
WebDirichlet.char2tex(mod, num)
))
return results | [
100,
280,
1267,
7617
] |
def METHOD_NAME(self, constructor: PuzzleInfo, inner_puzzle: Program) -> Program:
also = constructor.also()
if also is not None:
inner_puzzle = self._construct(also, inner_puzzle)
launcher_hash = constructor["launcher_ph"] if "launcher_ph" in constructor else SINGLETON_LAUNCHER_HASH
return puzzle_for_singleton(constructor["launcher_id"], inner_puzzle, launcher_hash) | [
363
] |
def METHOD_NAME(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_snippet_details()
return 1
add_separator = False
snippets = self.client.kickstart.snippet.listCustom(self.session)
snippet = None
for name in args:
for s in snippets:
if s.get('name') == name:
snippet = s
break
if not snippet:
logging.warning(_N('%s is not a valid snippet') % name)
continue
if add_separator:
print(self.SEPARATOR)
add_separator = True
print(_('Name: %s') % snippet.get('name'))
print(_('Macro: %s') % snippet.get('fragment'))
print(_('File: %s') % snippet.get('file'))
print('')
print(snippet.get('contents'))
return 0 | [
74,
6176,
2051
] |
def METHOD_NAME(self) -> List[str]:
"""return list of all existing folder names."""
assert not self._idling
return [folder.name for folder in self.conn.folder.list()] | [
245,
2547
] |
def METHOD_NAME(self):
for att in self:
if att.in_invoice_ids and len(att.in_invoice_ids) == att.invoices_number:
att.registered = True
else:
att.registered = False | [
226,
3024
] |
def METHOD_NAME():
user = 'username'
host = 'localhost'
port = '3306'
db = 'mysql'
return "mysql://%s@%s:%s/%s" % (user, host, port, db) | [
550,
144
] |
def METHOD_NAME(skale_bp, db, cert_key_pair_host, cert_key_pair):
cert_path, key_path = cert_key_pair_host
with mock.patch('web.routes.ssl.set_schains_need_reload'), \
mock.patch('web.routes.ssl.reload_nginx'):
with files_data(cert_path, key_path, force=False) as data:
response = post_bp_files_data(
skale_bp,
get_api_url(BLUEPRINT_NAME, 'upload'),
file_data=data
)
assert response == {
'status': 'error',
'payload': 'SSL Certificates are already uploaded'
}
with files_data(cert_path, key_path, force=True) as data:
response = post_bp_files_data(
skale_bp,
get_api_url(BLUEPRINT_NAME, 'upload'),
file_data=data
)
assert response == {
'status': 'ok',
'payload': {}
} | [
9,
172,
1941,
1985
] |
async def METHOD_NAME(client, token, date, expected_count, first_id, start=True):
query = quote(f"start_date:{date}") if start else quote(f"end_date:{date}")
ind = 0 if start else -1
resp = await client.get(f"/invoices?query={query}&sort=created&desc=false", headers={"Authorization": f"Bearer {token}"})
assert resp.status_code == 200
assert resp.json()["count"] == expected_count
assert resp.json()["result"][ind]["id"] == first_id | [
250,
447,
153,
539
] |
def METHOD_NAME(mock_alias_mult):
"""
Tests return of multiple targets to one alias in opposite order
"""
with patch(
"salt.modules.aliases.__parse_aliases",
MagicMock(return_value=mock_alias_mult),
):
ret = aliases.has_target("hello", "[email protected], [email protected]")
assert not ret | [
9,
220,
1030,
1562,
2716
] |
def METHOD_NAME(self):
"""
Retrieves the part number of the component
Returns:
string: Part number of component
"""
return 'NA' | [
19,
578
] |
def METHOD_NAME(value, inputUnit, outputUnit):
if inputUnit is outputUnit:
return value
if value is None:
return None
if _converters is None:
_initConverters()
converter = _converters.get((inputUnit, outputUnit), None)
if converter is None:
raise TypeError("Impossible to convert from %s to %s" % (inputUnit.name, outputUnit.name))
return converter(value) | [
197
] |
def METHOD_NAME(excitation, c=Constant(1.), record=False, annotate=False, objective=None):
""" The forward problem """
# Define function space
U = FunctionSpace(mesh, "Lagrange", 1)
# Set up initial values
u0 = Function(U, name = "u0", annotate = annotate)
u1 = Function(U, name = "u1", annotate = annotate)
# Define test and trial functions
v = TestFunction(U)
u = TrialFunction(U)
# Define variational formulation
udot = (u - 2.*u1 + u0)
uold = (0.25*u + 0.5*u1 +0.25*u0)
F = (udot*v+k*k*c*c*uold.dx(0)*v.dx(0))*dx - u*v*ds(0) + excitation*v*ds(0)
a = lhs(F)
L = rhs(F)
# Prepare solution
u = Function(U, name = "u", annotate = annotate)
# The actual timestepping
if record: rec = [u1(1.),]
i = 1
t = 0.0 # Initial time
T = 3.e-1 # Final time
times = [t,]
if objective is not None:
objective(u1, times[-1])
while t < T - .5*float(k):
excitation.t = t + float(k)
solve(a == L, u, annotate = annotate)
u0.assign(u1, annotate = annotate)
u1.assign(u, annotate = annotate)
t = i*float(k)
times.append(t)
if record:
rec.append(u1(1.0))
i += 1
if objective is not None:
objective(u1, times[-1])
if record:
np.savetxt("recorded.txt", rec)
return u1, times | [
76
] |
def METHOD_NAME(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> _models.PrivateLinkResourceListResult:
"""Gets the private link resources that need to be created for a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_05_01.models.PrivateLinkResourceListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2022-05-01"))
cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
request = build_list_by_storage_account_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | [
245,
604,
948,
598
] |
async def METHOD_NAME():
def stream_response():
yield b"streaming"
return StreamingResponse(stream_response()) | [
919
] |
def METHOD_NAME(self, model_type):
model = get_model(model_type)
layer_db = LayerDatabase(model)
layer = layer_db.find_layer_by_name(_get_layers(model, model_type)[2].name)
org_count = len(list(layer_db._compressible_layers.values()))
splitter = SpatialSvdPruner()
splitter._perform_svd_and_split_layer(layer, 1024, layer_db)
assert layer not in list(layer_db._compressible_layers.values())
after_split_count = len(list(layer_db._compressible_layers.values()))
assert (org_count + 1) == after_split_count | [
9,
407,
7505,
61,
265,
94
] |
def METHOD_NAME(self, message):
return METHOD_NAME(message) | [
6316
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(src_ds, dst_ds):
""" Copy the nodata value from on dataset to another """
assert src_ds.RasterCount == dst_ds.RasterCount
for i in range(src_ds.RasterCount):
src_band = src_ds.GetRasterBand(i+1)
if src_band.GetNoDataValue() is not None:
dst_band = dst_ds.GetRasterBand(i+1)
dst_band.SetNoDataValue(src_band.GetNoDataValue()) | [
215,
-1
] |
def METHOD_NAME(text, io=None, flist=None):
"""Open the Find in Files dialog.
Module-level function to access the singleton GrepDialog
instance and open the dialog. If text is selected, it is
used as the search phrase; otherwise, the previous entry
is used.
Args:
text: Text widget that contains the selected text for
default search phrase.
io: iomenu.IOBinding instance with default path to search.
flist: filelist.FileList instance for OutputWindow parent.
"""
root = text._root()
engine = searchengine.get(root)
if not hasattr(engine, "_grepdialog"):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get("sel.first", "sel.last")
dialog.open(text, searchphrase, io) | [
3433
] |
def METHOD_NAME():
schema = os.path.join(localdir, 'gluu_schema.json')
f = open(schema)
json_string = f.read()
f.close()
data = json.loads(json_string)
objClasses = data['objectClasses']
attTypes = data['attributeTypes']
docs = ''
for obj_class in objClasses:
docs += "\n\n## {}".format(" (or) ".join(obj_class['names']))
if 'desc' in obj_class:
docs += "\n_{}_".format(obj_class['desc'].encode('utf-8'))
for obj_attr in obj_class['may']:
attr_docs_added = False
for attr_type in attTypes:
if obj_attr in attr_type['names']:
docs += "\n* __{}__".format(" (or) ".join(attr_type['names']))
if 'desc' in attr_type:
docs += ": {}".format(attr_type['desc'].encode('utf-8'))
attr_docs_added = True
break
if not attr_docs_added:
docs += "\n* __{}__".format(obj_attr)
print(docs) | [
93,
135,
672
] |
def METHOD_NAME(decoy: Decoy) -> MagDeckHardware:
"""Get a mock synchronous module hardware."""
return decoy.mock(name="MagDeckHardware") # type: ignore[no-any-return] | [
248,
164,
298,
1242
] |
def METHOD_NAME(required_params: Sequence[ModelField], received_params: Union[Mapping[str, Any], QueryParams, Headers]) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
... | [
377,
434,
24,
335
] |
def METHOD_NAME(self):
if self._config is None:
self._config, _ = get_datasource_config(self._config_id, self.domain, self.data_source_type)
return self._config | [
200
] |
def METHOD_NAME(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(
shape=(self.K, input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True | [
56
] |
async def METHOD_NAME(self, scope: str, **kwargs: Any) -> _models.SystemAssignedIdentity:
"""Gets the systemAssignedIdentity available under the specified RP scope.
:param scope: The resource provider scope of the resource. Parent resource being extended by
Managed Identities. Required.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SystemAssignedIdentity or the result of cls(response)
:rtype: ~azure.mgmt.msi.v2018_11_30.models.SystemAssignedIdentity
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2018-11-30"] = kwargs.pop("api_version", _params.pop("api-version", "2018-11-30"))
cls: ClsType[_models.SystemAssignedIdentity] = kwargs.pop("cls", None)
request = build_get_by_scope_request(
scope=scope,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SystemAssignedIdentity", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | [
19,
604,
913
] |
def METHOD_NAME(project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTiersResult:
"""
Get all available machine types (tiers) for a project, for example, db-custom-1-3840. For more information see the
[official documentation](https://cloud.google.com/sql/)
and
[API](https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/tiers/list).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
tiers = gcp.sql.get_tiers(project="sample-project")
all_available_tiers = [v.tier for v in tiers.tiers]
pulumi.export("avaialbleTiers", all_available_tiers)
```
:param str project: The Project ID for which to list tiers. If `project` is not provided, the project defined within the default provider configuration is used.
"""
__args__ = dict()
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:sql/getTiers:getTiers', __args__, opts=opts, typ=GetTiersResult).value
return AwaitableGetTiersResult(
id=pulumi.get(__ret__, 'id'),
project=pulumi.get(__ret__, 'project'),
tiers=pulumi.get(__ret__, 'tiers')) | [
19,
8307
] |
def METHOD_NAME(self):
return len(self.dict2set(self.mdict)), len(self.mdict) | [
19,
3168,
29
] |
def METHOD_NAME(self) -> bool:
return self._proba | [
2550
] |
def METHOD_NAME(self, data):
params = self.params.copy()
lookup = {
"biweight": "biw",
"cosine": "cos",
"cosine2": "cos2",
"epanechnikov": "epa",
"gaussian": "gau",
"triangular": "tri",
"triweight": "triw",
"uniform": "uni",
}
with suppress(KeyError):
params["kernel"] = lookup[params["kernel"].lower()]
if params["kernel"] not in lookup.values():
msg = (
"kernel should be one of {}. "
"You may use the abbreviations {}"
)
raise PlotnineError(msg.format(lookup.keys(), lookup.values()))
return params | [
102,
434
] |
def METHOD_NAME(precalculated_rf_classifier_explainer):
fig = precalculated_rf_classifier_explainer.plot_classification()
assert isinstance(fig, go.Figure)
fig = precalculated_rf_classifier_explainer.plot_classification(percentage=True)
assert isinstance(fig, go.Figure)
fig = precalculated_rf_classifier_explainer.plot_classification(cutoff=0)
assert isinstance(fig, go.Figure)
fig = precalculated_rf_classifier_explainer.plot_classification(cutoff=1)
assert isinstance(fig, go.Figure) | [
9,
1288,
493
] |
def METHOD_NAME(self):
# type: () -> None
api.package_app(
project_dir=self.source_dir,
output_dir=self._sam_package_dir,
stage=self.stage_name,
chalice_config=self.stage_config,
) | [
360,
991
] |
def METHOD_NAME(self):
return self.get_predictor_configs(), ["gaussian_random"], (1e-5, 1e-5) | [
734,
4476,
736
] |
def METHOD_NAME():
"""
Get the current alias table.
"""
try:
alias_table = get_config_parser()
alias_table.read(azext_alias.alias.GLOBAL_ALIAS_PATH)
return alias_table
except Exception: # pylint: disable=broad-except
return get_config_parser() | [
19,
533,
410
] |
def METHOD_NAME(self):
a = self.test_class()
a.value = 2
self.assertIs(type(a.value), float)
self.assertEqual(a.value, 2.0)
a.value_or_none = 2
self.assertIs(type(a.value_or_none), float)
self.assertEqual(a.value_or_none, 2.0) | [
9,
1186,
565,
4143
] |
def METHOD_NAME(self, name: str) -> Integration:
if name in self.__cache:
return self.__cache[name]
path = self.repo.path / name
if not path.is_dir():
raise OSError(f'Integration does not exist: {Path(self.repo.path.name, name)}')
integration = Integration(path, self.repo.path, self.repo.config)
if not integration.is_valid:
raise OSError(f'Path is not an integration nor a Python package: {Path(self.repo.path.name, name)}')
self.__cache[name] = integration
return integration | [
19
] |
def METHOD_NAME(obj, resolver, **kw):
meta = obj.meta
num = int(meta['partitions_-size'])
data = []
for i in range(num):
if meta[f'partitions_-{i}'].islocal:
data.append(resolver(obj.member(f'partitions_-{i}'), **kw))
tf_data = data[0]
for i in range(1, len(data)):
tf_data = tf_data.concatenate(data[i])
return tf_data | [
554,
285,
1616,
1836
] |
def METHOD_NAME(self):
from cms.wizards.wizard_pool import wizard_pool
delete = [
"djangocms_blog",
"djangocms_blog.cms_wizards",
]
for module in delete:
if module in sys.modules:
del sys.modules[module]
wizard_pool._reset()
super().METHOD_NAME() | [
0,
1
] |
def METHOD_NAME(self) -> str | None:
... | [
2312,
1067
] |
def METHOD_NAME(res: TimingResultType, header: Tuple[str, ...] = None) -> str:
if header is None:
header = ("model", "task", "mean", "var")
out = ""
def write_line(*args):
nonlocal out
out += f"| {' | '.join(str(a) for a in args)} |\n"
# Make it a markdown table
write_line(*header)
write_line(*["--"] * len(header))
for model, tasks in res.items():
for task, line in tasks.items():
write_line(*(model, task) + line)
return out | [
24,
108,
410
] |
def METHOD_NAME(self):
collision = Collision()
collision.set_raw_pose(Pose3d(-10, -20, -30, math.pi, math.pi, math.pi))
collision2 = copy.deepcopy(collision)
self.assertEqual(Pose3d(-10, -20, -30, math.pi, math.pi, math.pi),
collision2.raw_pose()) | [
9,
6713
] |
def METHOD_NAME(vars_local):
"""Tests that files are written correctly."""
url, url_seg = vars_local
files = sorted(glob.glob("*.feather"))
for f in sorted(files):
os.remove(f)
nbr = nbrhood.NeighborhoodFeatures(
url=url, radius=1, offset=OFF, segment_url=url_seg
)
nbr.fit([2], 5, file_path="test", batch_size=10)
files = sorted(glob.glob("*.feather"))
for f in sorted(files):
print(f)
os.remove(f)
assert files == ["test0_10_2_4.feather"]
df_nbr = nbr.fit([2], 5, file_path="test", batch_size=10, start_seg=2, start_vert=0)
files = sorted(glob.glob("*.feather"))
for f in sorted(files):
os.remove(f)
assert files == ["test0_10_2_4.feather"] | [
9,
171,
77
] |
def METHOD_NAME():
style = Style(bold=True, color="red", bgcolor="blue")
colorless_style = style.without_color
assert colorless_style.color == None
assert colorless_style.bgcolor == None
assert colorless_style.bold == True
null_style = Style.null()
assert null_style.without_color == null_style | [
9,
529,
36
] |
def METHOD_NAME(self, data, user):
if not self.filtered_users or user in self.filtered_users:
return func(self, data, user) | [
527
] |
def METHOD_NAME(out_dir, mode, tsv_file, parameters):
from clinicadl.prepare_data.prepare_data import DeepLearningPrepareData
DeepLearningPrepareData(
caps_directory=out_dir / f"caps_{mode}",
tsv_file=tsv_file,
n_proc=1,
parameters=parameters,
) | [
297,
1680
] |
def METHOD_NAME(self):
return 0 | [
19,
525,
8291,
281
] |
def METHOD_NAME(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountResult]:
"""
Get a Maps Account.
Azure REST API version: 2021-02-01.
:param str account_name: The name of the Maps Account.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... | [
19,
598,
146
] |
def METHOD_NAME(cluster_name: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
A cluster resource
:param str cluster_name: Name of the cluster in the private cloud
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20200320:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
cluster_id=pulumi.get(__ret__, 'cluster_id'),
cluster_size=pulumi.get(__ret__, 'cluster_size'),
hosts=pulumi.get(__ret__, 'hosts'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
sku=pulumi.get(__ret__, 'sku'),
type=pulumi.get(__ret__, 'type')) | [
19,
2059
] |
f METHOD_NAME(self): | [
19,
355
] |
METHOD_NAME(self): | [
656,
679,
551
] |
def METHOD_NAME(self):
exc = TaurusConfigError('Metric is required in Local monitoring client')
metric_names = self.config.get('metrics', exc)
bad_list = set(metric_names) - set(self.AVAILABLE_METRICS)
if bad_list:
self.log.warning('Wrong metrics found: %s', bad_list)
good_list = set(metric_names) & set(self.AVAILABLE_METRICS)
if not good_list:
raise exc
self.metrics = list(set(good_list))
self.monitor = ServerLocalMonitor(self.log, self.metrics, self.engine)
self.interval = dehumanize_time(self.config.get("interval", self.engine.check_interval))
if self.config.get("logging", False):
if not PY3:
self.log.warning("Logging option doesn't work on python2.")
else:
self.logs_file = self.engine.create_artifact("local_monitoring_logs", ".csv")
with open(self.logs_file, "a", newline='') as mon_logs:
logs_writer = csv.writer(mon_logs, delimiter=',')
metrics = ['ts'] + sorted([metric for metric in good_list])
logs_writer.writerow(metrics) | [
707
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.