text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
"""Get boolean indicating whether edge is directed.
Returns:
bool True if edge is directed, if nothing set None
"""
return self._directed | [
137,
10068
] |
def METHOD_NAME(self, alarm: Alarm):
"""
Runs when an alarm is raised. Sets the class' killed state to ``True``.
"""
self._killed = True | [
3966
] |
def METHOD_NAME(self, y):
if self.pad and y.shape[-1] < 14000:
y = F.pad(y, (0, 14000 - y.shape[-1]))
window = torch.hann_window(self.win_length).to(y)
y = torch.stft(y, n_fft=self.embedder_params["n_fft"],
hop_length=self.hop_length,
win_length=self.win_length,
window=window)
magnitudes = torch.norm(y, dim=-1, p=2) ** 2
mel = torch.log10(self.mel_basis @ magnitudes + 1e-6)
return mel | [
19,
3779
] |
def METHOD_NAME(infile: Path, outfile: Path, edge_types: List[str]) -> None:
with text_out(outfile) as fh:
exporter = NXGraphExporter(fh, edge_types=edge_types)
export_stream(exporter, infile) | [
294,
-1
] |
def METHOD_NAME(self, message):
index = self.find("id", str(id(message)))
if index != -1:
self.setProperty(index, "title", message.getTitle()) | [
69,
277,
2893,
1180
] |
def METHOD_NAME(self, user: User) -> PosixPath:
if user.is_anonymous:
user_id = "anonymous"
else:
user_id = str(user.id)
return self.UPLOAD_DIR / user_id | [
21,
1190
] |
def METHOD_NAME(self):
return RET_OK | [
7267
] |
def METHOD_NAME(self, filename):
self.filename = filename or ''
if filename and os.path.exists(filename):
with open(filename, 'r') as handle:
content = handle.read()
else:
content = ''
self._set(content) | [
1452
] |
def METHOD_NAME(self):
return bool(self.__flags & DEF_ANNOT) | [
137,
9109
] |
def METHOD_NAME(self, organisation, div, subtype=None):
field_id = "__".join(
[str(x) for x in [organisation.pk, div.pk, subtype] if x]
)
field = forms.ChoiceField(
choices=self.choices,
widget=dc_forms.RadioSelectCluster,
label=div.name,
initial="no_seats",
required=False,
)
self.fields[field_id] = field | [
238,
97,
101
] |
def METHOD_NAME():
return (
# The order of these source is significant as they will short-circuit
HTTPSource,
FilePathSource,
BytesIOSource,
BufferedIOSource,
TextIOSource,
WrappedTemporaryFileSource,
IterableSource
) | [
19,
249,
505
] |
def METHOD_NAME(func):
get_queue_or_skip()
x = dpt.ones((), dtype="i4")
assert_equal(dpt.asnumpy(func(x)), True)
x = dpt.zeros((), dtype="i4")
assert_equal(dpt.asnumpy(func(x)), False) | [
9,
201,
6603,
3575
] |
def METHOD_NAME(calculation_layer, property_type):
"""Tests serialisation and deserialization of a calculation schema."""
schema = registered_calculation_schemas[calculation_layer][property_type]
if callable(schema):
schema = schema()
json_schema = schema.json()
schema_from_json = WorkflowSchema.parse_json(json_schema)
property_recreated_json = schema_from_json.json()
assert json_schema == property_recreated_json | [
9,
135,
2109
] |
def METHOD_NAME(self):
# size()
self.assertEqual(self.ca.size(),3) | [
9,
1318
] |
def METHOD_NAME(self, params):
# Renaming for convenience
zeta = params['zeta']
zeta_star = params['zeta_star']
gamma = params['gamma']
gamma_star = params['gamma_star']
dist_to_orig = params['dist_to_orig']
nsurf = len(zeta)
for isurf in range(nsurf):
r_surf, ndx1_surf, dx1_surf, dxmax_surf = self.get_all_surface_parameters(isurf)
M, N = zeta_star[isurf][0, :, :].shape
for j in range(N):
zeta_star[isurf][:, 0, j] = zeta[isurf][:, -1, j]
for i in range(1, M):
deltax = self.get_deltax(i, dx1_surf, ndx1_surf, r_surf, dxmax_surf)
zeta_star[isurf][:, i, j] = zeta_star[isurf][:, i - 1, j] + deltax*self.u_inf_direction
gamma[isurf] *= 0.
gamma_star[isurf] *= 0.
for isurf in range(nsurf):
M, N = zeta_star[isurf][0, :, :].shape
dist_to_orig[isurf][0] = 0.
for j in range(0, N):
for i in range(1, M):
dist_to_orig[isurf][i, j] = (dist_to_orig[isurf][i - 1, j] +
np.linalg.norm(zeta_star[isurf][:, i, j] -
zeta_star[isurf][:, i - 1, j]))
dist_to_orig[isurf][:, j] /= dist_to_orig[isurf][-1, j] | [
567
] |
def METHOD_NAME(a: dict, b: dict, message_start: str) -> None:
"""Assert that Directories ``a`` and ``b`` are the same.
``b`` is treated as the expected values that ``a`` shall abide by.
Print helpful error with custom message start.
"""
mismatch: list[str] = []
for b_key, b_value in b.items():
if b_key not in a:
mismatch.append(f"Missing item {b_key}: {b_value}")
elif b_value != a[b_key]:
mismatch.append(f"For {b_key} got {a[b_key]}, expected {b_value}")
for a_key, a_value in a.items():
if a_key not in b:
mismatch.append(f"Extraneous item {a_key}: {a_value}")
mismatch_str = "\n".join(mismatch)
assert len(mismatch) == 0, f"{message_start}\n{mismatch_str}" | [
638,
5194,
553,
979
] |
def METHOD_NAME(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0 | [
553,
799
] |
def METHOD_NAME(self):
# This is a regression test that checks that actuators delays are
# disabled when the trim takes place (GitHub issue #293).
script_path = self.sandbox.path_to_jsbsim_file('scripts',
'c1722.xml')
aircraft_tree, aircraft_name, _ = CopyAircraftDef(script_path,
self.sandbox)
root = aircraft_tree.getroot()
elevator_actuator = root.find("flight_control/channel/actuator[@name='fcs/elevator-actuator']")
delay = et.SubElement(elevator_actuator, 'delay')
delay.text = '0.1'
aircraft_tree.write(self.sandbox('aircraft', aircraft_name,
aircraft_name+'.xml'))
fdm = self.create_fdm()
fdm.set_aircraft_path(self.sandbox('aircraft'))
fdm.load_script(script_path)
fdm.run_ic()
while fdm.run():
if fdm['simulation/trim-completed'] == 1:
break | [
9,
1896,
41,
780,
1344
] |
def METHOD_NAME():
"""
Return the application log directory.
"""
warnings.warn(
f"'{__name__}.log_dir' is deprecated.",
DeprecationWarning, stacklevel=2
)
if sys.platform == "darwin":
name = QCoreApplication.applicationName() or "Orange"
logdir = os.path.join(os.path.expanduser("~/Library/Logs"), name)
else:
logdir = data_dir()
try:
os.makedirs(logdir, exist_ok=True)
except OSError:
pass
return logdir | [
390,
1190
] |
async def METHOD_NAME(self):
await super().METHOD_NAME() | [
9,
19,
15806
] |
def METHOD_NAME():
f = BytesIO(sample_celestrak_text)
s = list(parse_tle(f))
print(s)
assert len(s) == 2
assert s[0][0] == ['ISS (ZARYA)', 'ISS', 'ZARYA']
assert s[0][1].name == 'ISS (ZARYA)'
assert s[1][0] == ['FLOCK 2E-1']
assert s[1][1].name == 'FLOCK 2E-1' | [
9,
-1
] |
def METHOD_NAME(self):
"""break-guest-write behavior:
Guest write fails, but snapshot-access continues working and further
snapshot-read succeeds.
"""
log = self.do_cbw_error('break-guest-write')
self.assertEqual(log, """\ | [
9,
699,
6483,
77,
69,
12479,
168
] |
def METHOD_NAME(self) :
idx = 0
while idx < len(self.lightList) :
if (self.lastChanged[idx] > 1) :
self.lastChanged[idx] = self.lastChanged[idx] - 1
elif (self.lastChanged[idx] == 1) :
self.bit3List[idx].setCommandedState(CLOSED)
self.bit2List[idx].setCommandedState(CLOSED)
self.bit1List[idx].setCommandedState(CLOSED)
self.bit0List[idx].setCommandedState(CLOSED)
self.lastChanged[idx] = 0
if (self.showMsg) :
print "turning off " + self.lightNameList[idx]
idx = idx + 1
return | [
6553,
3988,
141
] |
def METHOD_NAME(self):
"""
Check if correct value is returned for filling in case of different
column type and fill method
"""
d = {"col1": [1, 2, 3, np.nan, np.nan], "col2": ["a", "a", np.nan, "b", "c"]}
df = pd.DataFrame(data=d)
# fill with median
preprocess_missing = PreprocessingMissingValues(
df.columns, PreprocessingMissingValues.FILL_NA_MEDIAN
)
self.assertEqual(preprocess_missing._get_fill_value(df["col1"]), 2)
self.assertEqual(preprocess_missing._get_fill_value(df["col2"]), "a")
# fill with mean
preprocess_missing = PreprocessingMissingValues(
df.columns, PreprocessingMissingValues.FILL_NA_MEDIAN
)
self.assertEqual(preprocess_missing._get_fill_value(df["col1"]), 2)
self.assertEqual(preprocess_missing._get_fill_value(df["col2"]), "a")
# fill with min
preprocess_missing = PreprocessingMissingValues(
df.columns, PreprocessingMissingValues.FILL_NA_MIN
)
self.assertEqual(preprocess_missing._get_fill_value(df["col1"]), 0)
self.assertEqual(
preprocess_missing._get_fill_value(df["col2"]), "_missing_value_"
) # added new value | [
9,
19,
1917,
99
] |
METHOD_NAME(paths): | [
9,
3336
] |
METHOD_NAME(self, tweet_id): | [
34,
13901
] |
def METHOD_NAME(cls) -> ArgumentParser:
from hathor.conf import HathorSettings
settings = HathorSettings()
def max_height(arg: str) -> Optional[int]:
if arg.lower() == 'checkpoint':
if not settings.CHECKPOINTS:
raise ValueError('There are no checkpoints to use')
return settings.CHECKPOINTS[-1].height
elif arg:
return int(arg)
else:
return None
parser = super().METHOD_NAME()
parser.add_argument('--export-file', type=FileType('wb', 0), required=True,
help='Save the export to this file')
parser.add_argument('--export-iterator', choices=['metadata', 'timestamp_index', 'dfs'], default='metadata',
help='Which method of iterating to use, don\'t change unless you know what it does')
parser.add_argument('--export-max-height', type=max_height,
help='Make no assumption about the mempool when using this option. It may be partially'
'exported or not, depending on the timestamps and the traversal algorithm.')
parser.add_argument('--export-skip-voided', action='store_true', help='Do not export voided txs/blocks')
return parser | [
129,
1319
] |
def METHOD_NAME(self):
root_items = []
for folder in self.folders:
item = WidgetItemFactory._build_item(None, folder)
root_items.append(item)
WidgetItemFactory.process_folder(item, folder)
return root_items | [
19,
1563,
451,
245
] |
def METHOD_NAME(db_file, db_file_name, report_folder):
db = open_sqlite_db_readonly(db_file)
cursor = db.cursor()
messages_count, rows = _perform_query(cursor, CHAT_MESSAGES_QUERY)
if messages_count > 0 and rows:
_parse_chat_messages(messages_count, rows, report_folder, db_file_name)
else:
logfunc(f'No {APP_NAME} chat data found')
cursor.close()
db.close() | [
214,
991,
463
] |
def METHOD_NAME(self):
mpich_module = module("show", self.modname).splitlines()
for line in mpich_module:
if "CRAY_MPICH_DIR" in line:
return get_path_args_from_module_line(line)[0]
# Fixes an issue on Archer2 cray-mpich/8.0.16 where there is
# no CRAY_MPICH_DIR variable in the module file.
for line in mpich_module:
if "CRAY_LD_LIBRARY_PATH" in line:
libdir = get_path_args_from_module_line(line)[0]
return os.path.dirname(os.path.normpath(libdir)) | [
751,
426
] |
def METHOD_NAME(self, x, y):
"""Get pixel at the speficied position."""
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
i = 3 * (y * self.width + x)
return (
self.data[i],
self.data[i + 1],
self.data[i + 2]
) | [
19,
976
] |
def METHOD_NAME(fn, *args):
import multiprocessing as mp
mp.set_start_method("spawn", force=True)
q = mp.Queue()
p = mp.Process(target=target, args=(q, fn, *args))
p.start()
retval = q.get()
p.join()
return retval | [
22,
623,
356
] |
def METHOD_NAME(self, sample_rate):
self.sample_rate = sample_rate
self.osmosdr_source_0.METHOD_NAME(self.sample_rate) | [
0,
734,
1585
] |
def METHOD_NAME(self, action_filename):
"""Parses the file and populates the data."""
self.actions = list()
self.hiid_to_action_index = dict()
f = codecs.open(action_filename, 'r', encoding='latin-1')
first_line = True
for line in f:
line = line.rstrip()
if first_line:
# Ignore the first line
first_line = False
else:
self.actions.append(GenewaysAction(line))
latestInd = len(self.actions)-1
hiid = self.actions[latestInd].hiid
if hiid in self.hiid_to_action_index:
raise Exception('action hiid not unique: %d' % hiid)
self.hiid_to_action_index[hiid] = latestInd | [
176,
1006,
245
] |
def METHOD_NAME(c2_netdef, tf_dir):
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def parse_net_def(path):
import google.protobuf.text_format # type: ignore[import]
net_def = caffe2_pb2.NetDef()
with open(path) as f:
google.protobuf.text_format.Merge(f.read(), net_def)
return core.Net(net_def)
graph_defs = [tb_exporter.nets_to_graph_def([parse_net_def(path)])
for path in c2_netdef]
events = [graph_def_to_event(i, graph_def)
for (i, graph_def) in enumerate(graph_defs, start=1)]
write_events(tf_dir, events)
log.info("Wrote %s graphs to logdir %s", len(events), tf_dir) | [
6991,
2922
] |
def METHOD_NAME(remote_ip, timeout=300):
"""
Test for how long a target host went down.
:param remote_ip: IP address or hostname of target host.
:param timeout: For how long will return a timeout exception
if host is not recovered.
:return: Time elapsed before target host is pingable.
:raise TimeoutExpection: :
"""
start_time = time.time()
end_time = time.time() + timeout
ping_cmd = 'ping -c 1 -W 1 ' + remote_ip
logging.debug('Wait for host shutting down.')
while True:
if time.time() > end_time:
raise TimeoutError(
'Downtime %s exceeds maximum allowed %s' %
(time.time() - start_time, timeout))
res = process.run(ping_cmd, ignore_status=True, verbose=False, shell=True)
if res.exit_status:
logging.debug('Host %s is down.', remote_ip)
break
else:
logging.debug('Host %s is up.', remote_ip)
time.sleep(1)
logging.debug('Time elapsed before host down: %.2fs',
(time.time() - start_time))
logging.debug('Wait for host recover from sleep.')
while True:
if time.time() > end_time:
raise TimeoutError(
'Downtime %s exceeds maximum allowed %s' %
(time.time() - start_time, timeout))
res = process.run(ping_cmd, ignore_status=True, verbose=False, shell=True)
if res.exit_status:
logging.debug('Host %s is down.', remote_ip)
else:
logging.debug('Host %s is up.', remote_ip)
break
down_time = time.time() - start_time
logging.debug('Time elapsed before host up: %.2fs', down_time)
return down_time | [
250,
1806,
481,
104
] |
def METHOD_NAME():
ocm_info = {
"recommendedVersions": [
{"recommendedVersion": "1.0.0", "workload": "foo"},
],
"upgradePolicyAllowedWorkloads": ["foo", "bar"],
"upgradePolicyClusters": [
{"name": "a", "upgradePolicy": {"workloads": ["foo"]}},
{"name": "c", "upgradePolicy": {"workloads": ["foo", "bar"]}},
{"name": "b", "upgradePolicy": {"workloads": ["bar"]}},
{"name": "b2", "upgradePolicy": {"workloads": ["bar"]}},
{"name": "b3", "upgradePolicy": {"workloads": ["bar"]}},
],
}
clusters: dict[str, OCMSpec] = {}
add_cluster(clusters, "a", "2.1.0", "stable")
add_cluster(clusters, "c", "2.0.0", "stable")
add_cluster(clusters, "b", "3.0.0", "fast")
add_cluster(clusters, "b2", "3.0.0", "fast")
add_cluster(clusters, "b3", "2.0.0", "fast")
assert get_updated_recommended_versions(ocm_info, clusters) == [
{
"channel": "stable",
"recommendedVersion": "2.1.0",
"workload": "foo",
"initialVersion": "openshift-v2.1.0",
},
{
"channel": "stable",
"recommendedVersion": "2.0.0",
"workload": "bar",
"initialVersion": "openshift-v2.0.0",
},
{
"channel": "fast",
"recommendedVersion": "3.0.0",
"workload": "bar",
"initialVersion": "openshift-v3.0.0-fast",
},
] | [
9,
19,
3758,
4324,
295,
107,
307
] |
def METHOD_NAME(self):
# Use one query to find all the properties fo all species under the type species .
short_species_list = []
triples = []
tmp = []
value_dictionary = {}
rst = query_blazegraph(query=ONTOKIN_ALL_PROPERTIES_ALL_SPECIES, namespace="ontokin")
non_attributes = ['species', 'label', 'transport']
heads = [h for h in rst['head']['vars'] if h not in non_attributes and '_unit' not in h]
unique_labels = []
for r in rst['results']['bindings']:
row = []
species = r['species']['value'] # .split('/')[-1]
transport = r['transport']['value']
label = r['label']['value']
if "#" in species:
short_species = species.split('#')[-1]
else:
short_species = species.split('/')[-1]
# filter, only put findable species ...
counter = 0
if label not in unique_labels:
short_species_list.append(short_species)
counter += 1
print(f"number of selected iris", counter)
row.append(species)
row.append(label)
for head in heads:
if head in r:
data = r[head]['value']
else:
data = "EMPTY"
new_node = head + '_' + short_species
row.append(new_node)
if head + '_unit' in r:
data_unit = r[head + '_unit']['value']
value_dictionary[new_node] = data + ' ' + data_unit
else:
# insert a new node, with part of the species and the relations
value_dictionary[new_node] = data
triples.append((short_species, head + '_latent', new_node))
tmp.append(row)
unique_labels.append(label)
print('number of unique labels', len(unique_labels))
df_all_species = pd.DataFrame(tmp)
df_all_species.columns = ['species', 'label'] + heads
df_all_species.to_csv(os.path.join(self.dataset_path, 'all_species.tsv'), sep='\t')
with open(os.path.join(self.dataset_path, 'value_dict.json'), 'w') as f:
f.write(json.dumps(value_dictionary))
f.close()
df_triples = pd.DataFrame(triples)
df_triples.to_csv(os.path.join(self.dataset_path, 'ontokin-train.txt'), sep='\t', index=False, header=False)
df_test = df_triples.sample(frac=0.2)
df_test.to_csv(os.path.join(self.dataset_path, 'ontokin-test.txt'), sep='\t', index=False, header=False) | [
539,
75,
8669
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Peer IP.
"""
return pulumi.get(self, "peer_ip") | [
502,
1213
] |
def METHOD_NAME(self, other: 'ComputationalBasisProductState') -> 'ComputationalBasisProductState':
return ComputationalBasisProductState(self.basis + other.basis) | [
8372
] |
def METHOD_NAME(
x, y, xlabel, ylabel, cols=1, rows=1, same_x=False, ylabel_title=True, to_file=None
):
"""Make a grid of plots.
Args:
x: values for x-axis. If all plots have the same x-axis the same_x arg
should be True, and only one x-value list is need, otherwise x should
be a list of x value lists.
y: list of lists of y values for plots
xlabel: if same_x=True, a x-axis label, if same_x=False, list of xaxis
labels
ylable: list of y-axis labels
cols: number of columns in plot grid
rows: number of rows in plot grid
same_x: If True use the same x-axis for all plots, otherwise expect a
list of x-axis data for the x-axis of each plot
ylabel_title: If True, use the ylabel for the plot title, otherwise label
the y-axis
Returns:
matplotlib.pyplot
"""
assert cols * rows >= len(y)
for i, yi in enumerate(y):
yilabel = ylabel[i]
if same_x:
xi = x
xilabel = xlabel
else:
xi = x[i]
xilabel = xlabel[i]
plt.subplot(rows, cols, i + 1)
plt.xlabel(xilabel)
plt.plot(xi, yi)
if ylabel_title:
plt.title(yilabel)
else:
plt.ylabel(yilabel)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
else:
plt.show()
return plt | [
1288,
753
] |
def METHOD_NAME(self):
return self.__trans.METHOD_NAME() | [
1452
] |
def METHOD_NAME(rotation_matrix_Y_HALF_PI):
quaternion = np.array([0, 0.7071068, 0.0, 0.7071068])
matrix = quaternion_to_rotation_matrix(quaternion)
assert np.allclose(rotation_matrix_Y_HALF_PI, matrix) | [
9,
2270,
24,
2271,
430,
320
] |
def METHOD_NAME():
np.random.seed(1)
reference = pd.Series(np.random.normal(0, 1, 10_000), name='A')
analysis = reference
wass_dist = WassersteinDistance(chunker=chunker, threshold=threshold)
wass_dist = wass_dist.fit(reference).calculate(analysis)
wass_dist = np.round(wass_dist, 2)
assert wass_dist == 0 | [
9,
-1,
2171,
2684,
1506,
1886
] |
def METHOD_NAME():
y_true = np.array([])
y_score = np.array([])
with pytest.raises(ValueError):
sklearn.metrics.roc_auc_score(y_true, y_score)
with pytest.raises(ValueError):
roc_auc(y_true, y_score) | [
9,
2066,
2067,
747,
41,
435,
313
] |
def METHOD_NAME(self, sourcename, status):
return self._base_client.update_source_status(
sourcename=sourcename,
status=status,
) | [
959,
1458,
452
] |
def METHOD_NAME(self):
clade_counts = {frozenset(['A', 'B']): 6,
frozenset(['A']): 7,
frozenset(['B']): 8}
edge_lengths = {frozenset(['A', 'B']): 1,
frozenset(['A']): 2,
frozenset(['B']): 3}
tree = _build_trees(clade_counts, edge_lengths, 'foo', TreeNode)[0]
self.assertEqual(tree.foo, 6)
tree_foos = set([c.foo for c in tree.children])
tree_lens = set([c.length for c in tree.children])
self.assertEqual(tree_foos, set([7, 8]))
self.assertEqual(tree_lens, set([2, 3])) | [
9,
56,
6587
] |
def METHOD_NAME(candidates: [dict], mask=None):
contexts = []
for candidate in candidates:
context = candidate["contexts"][0]
if mask:
context = context.replace(candidate["words_in_contexts"][0], mask)
contexts.append(context)
return contexts | [
19,
865,
6295
] |
def METHOD_NAME(category, event):
tpoint = "%s:%s" % (category, event)
if not args.filter or fnmatch.fnmatch(tpoint, args.filter):
print(tpoint)
if args.verbosity > 0:
print_tpoint_format(category, event) | [
38,
-1
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME():
path = os.path.join(os.getcwd(), "instances.yml")
with open(path, "rb") as fd:
content = fd.read().decode("utf-8")
return yaml.safe_load(content) | [
203,
200,
171
] |
METHOD_NAME(self,cpustate,proc,task_addr): | [
1917,
-1
] |
async def METHOD_NAME(self, interaction: discord.Interaction) -> bool:
if interaction.user.id != self._author_id:
await interaction.response.send_message("You can't do that.", ephemeral=True)
return False
return True | [
1309,
250
] |
def METHOD_NAME(self, packages_metadata: List[Dict[str, str]], ignore_warning_callback: Callable[[], None]) -> None:
"""
Show a dialog that prompts the user to install certain packages.
The dialog is worded for packages that are missing and required for a certain operation.
:param packages_metadata: The metadata of the packages that are missing.
:param ignore_warning_callback: A callback that gets executed when the user ignores the pop-up, to show them a
warning.
"""
self.missingPackageDialog = InstallMissingPackageDialog(packages_metadata, ignore_warning_callback)
self.missingPackageDialog.show() | [
697,
428,
1038,
360,
1301
] |
def METHOD_NAME(self, force=False):
"""
Clear the interface
Args:
force: Remove the object and create a new one (brute force)
Returns:
"""
if force:
self.canvas.fig.METHOD_NAME()
self.canvas.ax = self.canvas.fig.add_subplot(111)
# self.canvas.ax.clear()
# self.canvas = MplCanvas()
else:
self.canvas.ax.METHOD_NAME()
self.redraw() | [
537
] |
def METHOD_NAME(self):
alice_xmss = get_alice_xmss()
bob_xmss = get_bob_xmss()
random_xmss = get_random_xmss()
signatories = [alice_xmss.address, bob_xmss.address]
weights = [20, 20]
threshold = 21
multi_sig_tx = MultiSigCreate.create(signatories,
weights,
threshold,
0,
random_xmss.pk)
multi_sig_tx.sign(random_xmss)
multi_sig_address_state = MultiSigAddressState.get_default(multi_sig_tx.txhash,
signatories,
weights,
threshold)
multi_sig_addresses_state = {multi_sig_address_state.address: multi_sig_address_state}
AddressState.put_addresses_state(self.state, multi_sig_addresses_state)
multi_sig_address_state2 = MultiSigAddressState.get_multi_sig_address_state_by_address(
self.state._db,
MultiSigAddressState.generate_multi_sig_address(multi_sig_tx.txhash))
self.assertEqual(multi_sig_address_state.pbdata, multi_sig_address_state2.pbdata) | [
9,
1276,
457,
5136,
1065,
551
] |
def METHOD_NAME(self):
self.login_as(user=self.user, superuser=True)
response = self.client.get(self.url, {"query": "nonsense"})
assert response.status_code == 400
assert "nonsense" in response.data | [
9,
1068,
539
] |
def METHOD_NAME(label):
# When computing outs derived from srcs in a different package (i.e., when
# srcs labels have a colon), we only want their package-relative stem (the
# dirname and basename after the colon).
if ":" in label:
label = label.split(":")[-1]
return dirname(label), basename(label) | [
1821,
2838,
5926
] |
def METHOD_NAME(document_classifier):
docs = [
Document(content="""That's good. I like it.""", meta={"name": "0"}, id="1"),
Document(content="""That's bad. I don't like it.""", meta={"name": "1"}, id="2"),
]
results = document_classifier.predict_batch(documents=[docs, docs])
assert len(results) == 2 # 2 Document lists
expected_labels = ["joy", "sadness"]
for i, doc in enumerate(results[0]):
assert doc.to_dict()["meta"]["classification"]["label"] == expected_labels[i] | [
9,
352,
810,
2277,
107,
366,
50
] |
def METHOD_NAME( folder1, folder2 ):
ts1 = OpenPMDTimeSeries( folder1 )
ts2 = OpenPMDTimeSeries( folder2 )
# Check the vector fields
for field, coord in [("J", "z"), ("E","r"), ("E","z"), ("B","t")]:
print("Checking %s%s" %(field, coord))
field1, info = ts1.get_field(field, coord, iteration=0)
field2, info = ts2.get_field(field, coord, iteration=0)
# For 0 fields, do not use allclose
if abs(field1).max() == 0:
assert abs(field2).max() == 0
else:
assert np.allclose(
field1/abs(field1).max(), field2/abs(field2).max() )
# Check the rho field
print("Checking rho")
field1, info = ts1.get_field("rho", iteration=0)
field2, info = ts2.get_field("rho", iteration=0)
assert np.allclose( field1/abs(field1).max(), field2/abs(field2).max() ) | [
250,
1741,
342
] |
def METHOD_NAME(self, base: ExerciseBase, delete: bool):
if not base.pk:
return
exercise_languages = base.exercises.values_list('language', flat=True)
duplicates = [
Language.objects.get(pk=item)
for item, count in collections.Counter(exercise_languages).items() if count > 1
]
if not duplicates:
return
warning = f'Exercise {base.uuid} has duplicate translations!'
self.stdout.write(self.style.WARNING(warning))
# Output the duplicates
for language in duplicates:
translations = base.exercises.filter(language=language)
self.stdout.write(f'language {language.short_name}:')
for translation in translations:
self.stdout.write(f' * {translation.name} {translation.uuid}')
self.stdout.write('')
# And delete them
if delete:
self.stdout.write(f' Deleting all but first {language.short_name} translation')
for translation in translations[1:]:
translation.delete() | [
276,
1119,
5285
] |
def METHOD_NAME(
script_runner, mock_filtering, in_fodf, out_fodf, sym_fodf):
os.chdir(os.path.expanduser(tmp_dir.name))
ret = script_runner.run('scil_execute_angle_aware_bilateral_filtering.py',
in_fodf,
'out_fodf2.nii.gz',
'--out_sym', 'out_sym.nii.gz',
'--sphere', 'repulsion100',
'--sigma_angular', '1.0',
'--sigma_spatial', '1.0',
'--sigma_range', '1.0',
'--sh_basis', 'descoteaux07',
'--processes', '1', '-f',
print_result=True, shell=True)
assert ret.success
mock_filtering.assert_called_once()
ret_sym_fodf = nib.load("out_sym.nii.gz")
test_sym_fodf = nib.load(sym_fodf)
assert np.allclose(ret_sym_fodf.get_fdata(), test_sym_fodf.get_fdata()) | [
9,
6794,
1189,
146
] |
def METHOD_NAME(self, value):
"""Insert an occurrence of `value` into the btree."""
i = 0
n = len(self._tree)
while i < n:
cur = self._tree[i]
self._counts[i] += 1
if value < cur:
i = 2 * i + 1
elif value > cur:
i = 2 * i + 2
else:
return
raise ValueError("Value %s not contained in tree." "Also, the counts are now messed up." % value) | [
408
] |
def METHOD_NAME(self, widget=None):
"""
Aborts the addition of a new backend. Shows the configuration panel
previously loaded.
@param widget: just to make this function usable as a signal callback.
Not used.
"""
self.dialog.show_config_for_backend(None) | [
69,
608
] |
def METHOD_NAME(self) -> str:
return self._node_id.value | [
1716,
147
] |
def METHOD_NAME(pt, rkb, rk):
pt = hex2bin(pt)
# Initial Permutation
pt = permute(pt, initial_perm, 64)
print("After initial permutation", bin2hex(pt))
# Splitting
left = pt[0:32]
right = pt[32:64]
for i in range(0, 16):
# Expansion D-box: Expanding the 32 bits data into 48 bits
right_expanded = permute(right, exp_d, 48)
# XOR RoundKey[i] and right_expanded
xor_x = xor(right_expanded, rkb[i])
# S-boxex: substituting the value from s-box table by calculating row and column
sbox_str = ""
for j in range(0, 8):
row = bin2dec(int(xor_x[j * 6] + xor_x[j * 6 + 5]))
col = bin2dec(
int(xor_x[j * 6 + 1] + xor_x[j * 6 + 2] + xor_x[j * 6 + 3] + xor_x[j * 6 + 4]))
val = sbox[j][row][col]
sbox_str = sbox_str + dec2bin(val)
# Straight D-box: After substituting rearranging the bits
sbox_str = permute(sbox_str, per, 32)
# XOR left and sbox_str
result = xor(left, sbox_str)
left = result
# Swapper
if(i != 15):
left, right = right, left
print("Round ", i + 1, " ", bin2hex(left),
" ", bin2hex(right), " ", rk[i])
# Combination
combine = left + right
# Final permutation: final rearranging of bits to get cipher text
cipher_text = permute(combine, final_perm, 64)
return cipher_text | [
2196
] |
def METHOD_NAME(self, item_list, user, **kwargs):
return {
item: {
"config": [
serialize_field(self.project, item, c)
for c in item.get_config(
project=self.project, user=user, add_additial_fields=True
)
]
}
for item in item_list
} | [
19,
1685
] |
def METHOD_NAME(self):
base_class = self.env["l10n_br_fiscal.document.related"]
tree, visited = self.get_stacked_tree(base_class)
self.assertEqual(tree, NFeRelated.NFREF_TREE)
self.assertEqual(len(visited), 4) | [
9,
-1,
151
] |
def METHOD_NAME(self):
response = self.client.get(f'{self.base_endpoint}{self.questionnaire.uuid}')
self.assertEqual(response.status_code, 200)
response_json = response.json()
self.assertEqual(len(response_json['explanation']['sections']), 2)
self.assertIn('title', response_json['explanation'])
self.assertJsonSchema(self.detail_schema, response_json)
response_json = response.json()
self.assertIn('explanation', response_json)
self.assertEqual(len(response_json['explanation']['sections']), 2) | [
9,
16287,
1801,
604,
4977
] |
def METHOD_NAME(cmd1, cmd2):
try:
rc, result = getstatusoutput_noshell_pipe(cmd1, cmd2)
if rc != [0, 0]:
raise RuntimeError("Failed to execute command {} {}, return code {}, {}".format(cmd1, cmd2, rc, result))
except OSError as e:
raise RuntimeError("Failed to execute command {} {} due to {}".format(cmd1, cmd2, repr(e)))
return result | [
19,
462,
1571,
890
] |
def METHOD_NAME(self):
"""
:type: string
"""
self._completeIfNotSet(self._encoding)
return self._encoding.value | [
2300
] |
def METHOD_NAME():
import sverchok
# This part is called upon scrip.reload (F8), because old nodes will be unregistered again
# There is no way to say which old node classes should be registered without registering them all
if sverchok.reload_event:
register_all() | [
372
] |
def METHOD_NAME(
typeclass=None, key="testobj", location=None, delete_duplicates=True, **kwargs
):
"""
This is a convenience-wrapper for quickly building EvscapeRoom objects. This
is called from the helper-method create_object on states, but is also useful
for the object-create admin command.
Note that for the purpose of the Evscaperoom, we only allow one instance
of each *name*, deleting the old version if it already exists.
Keyword Args:
typeclass (str): This can take just the class-name in the evscaperoom's
objects.py module. Otherwise, a full path is needed.
key (str): Name of object.
location (Object): The location to create new object.
delete_duplicates (bool): Delete old object with same key.
kwargs (any): Will be passed into create_object.
Returns:
new_obj (Object): The newly created object, if any.
"""
if not (
callable(typeclass)
or typeclass.startswith("evennia")
or typeclass.startswith("typeclasses")
or typeclass.startswith("evscaperoom")
):
# unless we specify a full typeclass path or the class itself,
# auto-complete it
typeclass = _BASE_TYPECLASS_PATH + typeclass
if delete_duplicates:
old_objs = [
obj
for obj in search_object(key)
if not inherits_from(obj, "evennia.objects.objects.DefaultCharacter")
]
if location:
# delete only matching objects in the given location
[obj.delete() for obj in old_objs if obj.location == location]
else:
[obj.delete() for obj in old_objs]
new_obj = create_object(typeclass=typeclass, key=key, location=location, **kwargs)
return new_obj | [
129,
-1,
279
] |
def METHOD_NAME(text):
"""
classify tweets for tweets about car accidents and others
:param text: tweet text
:return: boolean, true if tweet is about car accident, false for others
"""
return text.startswith("בשעה") and (
tweet_with_accident_vehicle_and_person(text)
or tweet_with_car_accident(text)
or tweet_with_vehicles(text)
) | [
6144,
11319
] |
def METHOD_NAME():
with patch(
"salt.modules.nexus._get_snapshot_version_metadata",
MagicMock(
return_value={"snapshot_versions": {"zip": "0.0.2-20170920.212353-3"}}
),
):
artifact_url, file_name = nexus._get_snapshot_url(
nexus_url="http://nexus.example.com/repository",
repository="libs-snapshots",
group_id="com.company.sampleapp.web-module",
artifact_id="web",
version="0.2.0-SNAPSHOT",
packaging="zip",
headers={},
)
assert (
artifact_url
== "http://nexus.example.com/repository/libs-snapshots/com/company/sampleapp/web-module/web/0.2.0-SNAPSHOT/web-0.0.2-20170920.212353-3.zip"
)
assert file_name == "web-0.0.2-20170920.212353-3.zip" | [
9,
363,
274,
43,
394,
281
] |
def METHOD_NAME(self, *largs):
if self._dropdown:
self._dropdown.unbind(on_select=self._on_dropdown_select)
self._dropdown.unbind(on_dismiss=self._close_dropdown)
self._dropdown.dismiss()
self._dropdown = None
cls = self.dropdown_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
self._dropdown = cls()
self._dropdown.bind(on_select=self._on_dropdown_select)
self._dropdown.bind(on_dismiss=self._close_dropdown)
self._update_dropdown() | [
56,
5503
] |
def METHOD_NAME(self) -> 'outputs.LabelingJobPropertiesResponse':
"""
Definition of a labeling job.
"""
return pulumi.get(self, "properties") | [
748
] |
async def METHOD_NAME():
raise error | [
241,
168
] |
def METHOD_NAME(self, data):
data = self.preprocess(data)
data = self.inference(data)
return self.postprocess(data) | [
2103
] |
def METHOD_NAME(batch, y, nxt_y, y0, alpha):
pred = graph.predecessor(batch.numpy())
self_label = paddle.to_tensor(y[batch.numpy()])
self_label0 = paddle.to_tensor(y0[batch.numpy()])
pred_id = []
for n, p in enumerate(pred):
if len(p) > 0:
pred_id.append(np.ones(len(p)) * n)
pred_cat = np.concatenate(pred)
pred_id_cat = paddle.to_tensor(np.concatenate(pred_id), dtype="int64")
pred_cat_pd = paddle.to_tensor(pred_cat)
pred_label = paddle.to_tensor(y[pred_cat])
pred_norm = paddle.gather(indegree, pred_cat_pd)
self_norm = paddle.gather(indegree, paddle.to_tensor(batch, dtype="int64"))
others = paddle.zeros_like(self_label)
others = paddle.scatter(others, pred_id_cat, pred_label)
others = (1 - alpha) * (others + self_label
) * self_norm + alpha * self_label0
others = others / paddle.sum(others, -1, keepdim=True)
nxt_y[batch] = others.numpy() | [
13748
] |
def METHOD_NAME(self):
barrier()
data = self.generateData(1)
self.checkResult(data, parallel_sort(data)) | [
9,
266,
-1
] |
async def METHOD_NAME(self, **kwargs):
client = kwargs.pop("client")
with open(self.receipt_jpg, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_receipts(
my_file,
content_type="application/jpeg"
)
result = await poller.result() | [
9,
7405,
1068,
459,
44,
49,
7680
] |
def METHOD_NAME(self):
"""
Make sure we can list all the tables in a schema.
"""
table_list = (
SchemaManager()
.list_tables(schema_name=Band._meta.schema)
.run_sync()
)
self.assertListEqual(table_list, [Band._meta.tablename]) | [
9,
245,
2253
] |
def METHOD_NAME(self):
'''
Set status to 'on'
Input:
None
Output:
None
'''
self.set_status(True) | [
69
] |
def METHOD_NAME(self):
self.x_np = self.random(
shape=self.case["x_shape"],
dtype=self.case["dtype"],
low=self.case["x_low"],
high=self.case["x_high"])
self.y_np = self.random(
shape=self.case["y_shape"],
dtype=self.case["dtype"],
low=self.case["y_low"],
high=self.case["y_high"])
self.y_np = np.where(self.y_np == 0, 1, self.y_np) | [
123,
1461
] |
def METHOD_NAME(
src: Path,
dest: Path,
copy_excludes: typing.Optional[typing.List[str]] = None,
patch_func: typing.Callable[[], None] = owlbot_patch,
) -> None:
"""Copies files from generated tree."""
entries = os.scandir(src)
if not entries:
logger.info("there is no version subdirectory to copy")
return
for entry in entries:
if entry.is_dir():
version_src = Path(entry.path).resolve()
owlbot_copy_version(version_src, dest, copy_excludes)
with pushd(dest):
patch_func() | [
16012,
57
] |
def METHOD_NAME(self) -> str:
"""
ARN of the KMS key used to encrypt the snapshot at rest.
"""
return pulumi.get(self, "kms_key_arn") | [
1666,
59,
1059
] |
def METHOD_NAME(
app, item_lib_martigny, patron_martigny, item_type_on_site_martigny,
loc_public_martigny, librarian_martigny, lib_martigny, lib_saxon,
loc_public_saxon, patron_type_adults_martigny, circulation_policies):
"""Test extend an on_loan item at an external library."""
patron_martigny['patron']['type']['$ref'] = get_ref_for_pid(
'ptty', patron_type_adults_martigny.pid)
patron_martigny.update(patron_martigny, dbcommit=True, reindex=True)
item_lib_martigny['item_type']['$ref'] = get_ref_for_pid(
'itty', item_type_on_site_martigny.pid)
item_lib_martigny.update(item_lib_martigny, dbcommit=True, reindex=True)
# the library level cipo3 is used here circ_policy_temp_martigny
params = {
'patron_pid': patron_martigny.pid,
'transaction_location_pid': loc_public_martigny.pid,
'transaction_user_pid': librarian_martigny.pid,
'pickup_location_pid': loc_public_martigny.pid
}
item, loan = item_record_to_a_specific_loan_state(
item=item_lib_martigny,
loan_state=LoanState.ITEM_ON_LOAN,
params=params, copy_item=True)
settings = app.config['CIRCULATION_POLICIES']['extension']
app.config['CIRCULATION_POLICIES']['extension']['from_end_date'] = True
loan['end_date'] = loan['start_date']
initial_loan = loan.update(loan, dbcommit=True, reindex=True)
assert get_circ_policy(
loan, checkout_location=True) == get_circ_policy(loan)
# The cipo used for the checkout or renewal is "short" which is configured
# only for lib_martigny. For other libraries it is the default cipo to be
# used.
params = {
'transaction_location_pid': loc_public_saxon.pid,
'transaction_user_pid': librarian_martigny.pid
}
cipo = get_circ_policy(loan)
item, actions = item.extend_loan(**params)
loan = Loan.get_record_by_pid(initial_loan.pid)
# now the extend action does not take into account anymore the transaction
# library so it continues to use the "short" policy for the extend action.
assert get_circ_policy(
loan, checkout_location=True).get('pid') == cipo.get('pid')
assert get_circ_policy(loan).get('pid') != cipo.get('pid') | [
9,
978,
69,
1024,
69,
16026,
41
] |
def METHOD_NAME(self):
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.properties import StringProperty
from kivy.uix.floatlayout import FloatLayout
Builder.load_string(""" | [
9,
706,
1563,
280,
544,
41,
4135
] |
def METHOD_NAME():
"""display all known resources"""
print("\n*** ctapipe resources ***\n")
print("CTAPIPE_SVC_PATH: (directories where resources are searched)")
if os.getenv("CTAPIPE_SVC_PATH") is not None:
for directory in datasets.get_searchpath_dirs():
print(f"\t * {directory}")
else:
print("\t no path is set")
print("")
all_resources = sorted(datasets.find_all_matching_datasets(r"\w.*"))
home = os.path.expanduser("~")
try:
resource_dir = files("ctapipe_resources")
except ImportError:
resource_dir = None
fmt = "{name:<30.30s} : {loc:<30.30s}"
print(fmt.format(name="RESOURCE NAME", loc="LOCATION"))
print("-" * 70)
for resource in all_resources:
if resource.suffix == ".py" or resource.name.startswith("_"):
continue
loc = str(resource)
if resource_dir is not None:
loc = loc.replace(resource_dir, "[ctapipe_resources]")
loc = loc.replace(home, "~")
print(fmt.format(name=resource.name, loc=loc)) | [
100,
1614
] |
def METHOD_NAME(self, routingKey, shouldMatch, filter):
cb = mock.Mock()
yield self.mq.startConsuming(cb, filter)
self.mq.produce(routingKey, 'x')
self.assertEqual(shouldMatch, cb.call_count == 1)
if shouldMatch:
cb.assert_called_once_with(routingKey, 'x') | [
74,
9,
590
] |
def METHOD_NAME(hostString):
a = hostString.split('.')
if len(a) != 4:
return False
for group in a:
if (group.find("/")!=-1):
group = group[:group.find("/")]
if not group.isdigit():
return False
i = int(group)
if i < 0 or i > 255:
return False
return True | [
1205,
4200
] |
def METHOD_NAME(self):
transaction = self.registration.transaction
if not transaction or transaction.provider != 'paypal':
return False
return (transaction.data['payment_status'] == request.form.get('payment_status') and
transaction.data['txn_id'] == request.form.get('txn_id')) | [
137,
1853,
3159
] |
def METHOD_NAME(a): return \ | [
497,
12663,
497,
1305,
12664,
1178,
12665
] |
def METHOD_NAME(self):
self.client.logout()
response = self.client.get(reverse('course-experience-course-deadlines-mobile', args=[self.course.id]))
assert response.status_code == 401 | [
9,
18,
19,
20,
21
] |
def METHOD_NAME(submission_) -> bool:
"""Check if submission has text or title."""
return hasattr(submission_, "selftext") or hasattr(submission_, "title") | [
220,
459
] |
def METHOD_NAME(self):
return self.rank == 0 | [
137,
2614
] |
def METHOD_NAME(self):
for label in [ '1.2.1.2.1a','1.2.3.1.1a', '1.4.1.12.4d', '1.4.3.6.2a', '1.4.6.1.1a', '1.4.10.1.1a' ]:
L = self.tc.get('/SatoTateGroup/'+label, follow_redirects=True)
assert "Moment sequences" in L.get_data(as_text=True)
for label in [ '1.2.B.2.1a','1.2.A.1.1a', '1.4.F.12.4d', '1.4.E.6.2a', '1.4.B.1.1a', '1.4.A.1.1a' ]:
L = self.tc.get('/SatoTateGroup/'+label)
assert "Moment sequences" in L.get_data(as_text=True) | [
9,
-1
] |
def METHOD_NAME(self, file_pattern, index):
"""
Reads the file of the PSU
:param file_pattern: The filename convention
:param index: An integer, 1-based index of the PSU of which to query status
:return: int
"""
return_value = 0
try:
with open(self.psu_path + file_pattern.format(index), 'r') as file_to_read:
return_value = int(file_to_read.read())
except IOError:
log_err("Read file {} failed".format(self.psu_path + file_pattern.format(index)))
return 0
return return_value | [
203,
171
] |
def METHOD_NAME(pid: int) -> None:
try:
os.kill(pid, signal.SIGTERM)
print(f"Process {pid} terminated.")
except Exception as e:
print(f"Error killing process {pid}: {e}") | [
643,
356
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.