text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(tag_name, tags):
"""
Return tags in a format that can be used as --tags argument value.
"""
tags = sorted(list(tags))
tags = ["'{}'".format(tag) if " " in tag else tag for tag in tags]
if tag_name == SIMPLE:
return " ".join(tags)
return "{}={}".format(tag_name, ",".join(tags)) | [
275,
114
] |
def METHOD_NAME(rootdir):
files = get_matching_files(rootdir / "test-root", include_patterns=["autodoc*"])
assert sorted(files) == [
'autodoc.txt', 'autodoc_target.py',
] | [
9,
19,
3626,
1537,
75,
1872,
426
] |
def METHOD_NAME( dict1, dict2 ):
for key in dict2:
dict1[ key ] = dict2[ key ] | [
411,
553
] |
def METHOD_NAME(self):
return list(range(self.QSFP_PORT_START, self.PORTS_IN_BLOCK + 1)) | [
5093,
907
] |
def METHOD_NAME(self, *args):
return uninstall(self.repo, *args) | [
1660,
3212,
2112
] |
def METHOD_NAME(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given
frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H",
"5min", "1D" etc.
"""
features_by_offsets: Dict[Any, List[TimeFeature]] = {
offsets.YearBegin: [],
offsets.YearEnd: [],
offsets.QuarterBegin: [month_of_year],
offsets.QuarterEnd: [month_of_year],
offsets.MonthBegin: [month_of_year],
offsets.MonthEnd: [month_of_year],
offsets.Week: [day_of_month, week_of_year],
offsets.Day: [day_of_week, day_of_month, day_of_year],
offsets.BusinessDay: [day_of_week, day_of_month, day_of_year],
offsets.Hour: [hour_of_day, day_of_week, day_of_month, day_of_year],
offsets.Minute: [
minute_of_hour,
hour_of_day,
day_of_week,
day_of_month,
day_of_year,
],
offsets.Second: [
second_of_minute,
minute_of_hour,
hour_of_day,
day_of_week,
day_of_month,
day_of_year,
],
}
offset = to_offset(freq_str)
for offset_type, features in features_by_offsets.items():
if isinstance(offset, offset_type):
return features
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
Q - quarterly
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg) | [
104,
2247,
280,
3831,
3
] |
def METHOD_NAME(self) -> None:
"""Decrease the loss scale
"""
self._scale = self._scale * self._backoff_factor
if self._min_scale:
self._scale = torch.max(self._scale, self._min_scale) | [
4287,
930
] |
def METHOD_NAME(payload):
logger.info(f"Lithops v{__version__} - Generating metadata")
runtime_meta = get_runtime_metadata()
internal_storage = InternalStorage(payload)
status_key = '/'.join([JOBS_PREFIX, payload['runtime_name']+'.meta'])
logger.info(f"Runtime metadata key {status_key}")
dmpd_response_status = json.dumps(runtime_meta)
internal_storage.put_data(status_key, dmpd_response_status) | [
297,
1888,
773
] |
def METHOD_NAME(project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransferProjectServiceAccountResult:
"""
Use this data source to retrieve Storage Transfer service account for this project
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
default = gcp.storage.get_transfer_project_service_account()
pulumi.export("defaultAccount", default.email)
```
:param str project: The project ID. If it is not provided, the provider project is used.
"""
__args__ = dict()
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:storage/getTransferProjectServiceAccount:getTransferProjectServiceAccount', __args__, opts=opts, typ=GetTransferProjectServiceAccountResult).value
return AwaitableGetTransferProjectServiceAccountResult(
email=pulumi.get(__ret__, 'email'),
id=pulumi.get(__ret__, 'id'),
member=pulumi.get(__ret__, 'member'),
project=pulumi.get(__ret__, 'project'),
subject_id=pulumi.get(__ret__, 'subject_id')) | [
19,
1286,
155,
549,
598
] |
def METHOD_NAME(self):
result = natgateway.is_nat_gateway_profile_provided(None, 4)
self.assertTrue(result) | [
9,
6010,
1150,
659
] |
def METHOD_NAME(debug_log=api.current_logger().debug,
error_log=api.current_logger().error,
is_installed=_check_package,
append_function=_append_string,
check_function=_macro_exists):
"""
Iterate over dictionary and updates each configuration file.
:param func debug_log: function for debug logging
:param func error_log: function for error logging
:param func is_installed: checks if the package is installed
:param func append_function: appends a string into file
:param func check_function: checks if a string exists in file
"""
error_list = []
if not is_installed('sane-backends'):
return
for path, lines in NEW_QUIRKS.items():
debug_log('Updating SANE configuration file {}.'.format(path))
try:
update_config(path, lines, check_function, append_function)
except (OSError, IOError) as error:
error_list.append((path, error))
if error_list:
error_log('The files below have not been modified '
'(error message included):' +
''.join(['\n - {}: {}'.format(err[0], err[1])
for err in error_list]))
return | [
86,
1387
] |
def METHOD_NAME(inp):
return np.sum(inp, 1) | [
2257
] |
def METHOD_NAME(self): ... | [
1297,
865,
573,
771,
1024
] |
def METHOD_NAME() -> Tuple[str, ...]:
return ("core18",) | [
19,
616,
7346
] |
METHOD_NAME(specification, option_name, default_value, description): | [
837,
3255,
238,
962,
200,
1335
] |
def METHOD_NAME(strategy_optimizer) -> list:
return strategy_optimizer.risks | [
19,
968,
75,
-1
] |
def METHOD_NAME():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
with raises(AxiomError) as err:
Arrow((g, ), x, y)
with raises(AxiomError) as err:
Arrow((f, ), x, z)
with raises(AxiomError) as err:
g >> f | [
9,
-1,
168
] |
def METHOD_NAME(query, parsed_query):
s = WorkSearchScheme()
assert s.process_user_query(query) == parsed_query | [
9,
356,
21,
539
] |
def METHOD_NAME(environment: str, request_id: str) -> str:
return f"sqlmesh_plan_application__{environment}__{request_id}" | [
145,
88,
5791,
147
] |
def METHOD_NAME(self):
if self.zoom_x_limits is not None:
self.ax.set_xlim(self.zoom_x_limits )
self.ax.set_ylim(self.zoom_y_limits) | [
0,
679,
2093
] |
def METHOD_NAME(self):
if self.templateRadioButton.isChecked():
self.historyFileLabel.setEnabled(False)
self.historyFileEdit.setEnabled(False)
self.browseHistoryButton.setEnabled(False)
self.templateFileLabel.setEnabled(True)
self.templateFileEdit.setEnabled(True)
self.browseTemplateButton.setEnabled(True)
button = self.buttonBox.button(QDialogButtonBox.Ok)
button.setEnabled(True) | [
697,
671,
1335
] |
def METHOD_NAME(self):
pass | [
709,
710
] |
def METHOD_NAME(self):
"""Performs C++ code generation for the feature."""
first = True
for struct in self.get_filtered_struct_names():
body = '' if first else '\n'
body += 'void EncodeStruct(ParameterEncoder* encoder, const {}& value)\n'.format(
struct
)
body += '{\n'
body += self.make_struct_body(
struct, self.feature_struct_members[struct], 'value.'
)
body += '}'
write(body, file=self.outFile)
first = False | [
567,
964
] |
def METHOD_NAME(loadout, is_beta) -> Union[str, bool]:
"""Return a URL for the current ship."""
# most compact representation
string = json.dumps(loadout, ensure_ascii=False, sort_keys=True, separators=(',', ':')).encode('utf-8')
if not string:
return False
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode='w') as f:
f.write(string)
encoded = base64.urlsafe_b64encode(out.getvalue()).decode().replace('=', '%3D')
return _get_target_url(is_beta) + encoded | [
4086,
274
] |
def METHOD_NAME(goto_binary, fyle, target_folder):
fyle = os.path.normpath(fyle)
with TemporaryDirectory() as tmpdir:
module = get_module_name(fyle)
header_file = "{}_datastructure.h".format(module)
drop_header_cmd = ["goto-instrument",
"--dump-c-type-header",
module,
goto_binary,
header_file]
res = subprocess.run(drop_header_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
cwd=tmpdir)
if res.returncode:
logging.error("Dumping type header for file '%s' failed", fyle)
logging.error("The command `%s` returned %s",
drop_header_cmd,
res.stdout)
logging.error("The return code is %d", int(res.returncode))
sys.exit(1)
header = os.path.normpath(os.path.join(tmpdir, header_file))
collected = collect_defines(fyle)
logging.debug("Dumping the following header file to '%s':\n%s\n"
"// END GENERATED HEADER FILE", header, collected)
with open(header, "a") as out:
print(collected, file=out)
target_file = os.path.normpath(os.path.join(target_folder, header_file))
shutil.move(header, target_file) | [
93,
572,
171
] |
def METHOD_NAME(
self,
ib_contract_pattern: ibContract,
allow_expired: bool = False,
allow_multiple_contracts: bool = False,
) -> Union[ibContractDetails, List[ibContractDetails]]:
contract_details = self._get_contract_details(
ib_contract_pattern, allow_expired=allow_expired
)
if len(contract_details) == 0:
raise missingContract
if allow_multiple_contracts:
return contract_details
elif len(contract_details) > 1:
self.log.critical(
"Multiple contracts and only expected one - returning the first"
)
return contract_details[0] | [
19,
1522,
2051
] |
def METHOD_NAME(self, qubits):
return [] | [
7426
] |
def METHOD_NAME(pickleDir, logsDir, htmlDir):
aoSorting = ""
for i in range(len(ordering)):
aoSorting += "["+ str(i+1) +",'desc'],"
aoSorting += "[0, 'asc']"
style = """
<style type="text/css" title="currentStyle">
@import "/SDT/html/jsExt/dataTables/media/css/demo_table.css";
</style>
<script type="text/javascript" src="/SDT/html/jsExt/dataTables/media/js/jquery.js"></script>
<script type="text/javascript" src="/SDT/html/jsExt/dataTables/media/js/jquery.dataTables.js"></script>
<script type="text/javascript" charset="utf-8">
/* Initialise the table with the required column sorting data types */
$(document).ready(function() {
$('#mainTable').dataTable( {
"oLanguage": {
"sLengthMenu": "Display _MENU_ records per page",
"sInfoEmpty": "Showing 0 to 0 of 0 records"
},
"aaSorting": [%s]
} );
$('#descriptionTable').dataTable( {
"aaSorting": [[0, 'asc']],
"bPaginate": false,
"bLengthChange": false,
"bFilter": false,
"bSort": false,
"bInfo": false,
"bAutoWidth": false
});
$('#descriptionTable thead th').css({'border-bottom': '1px solid black'});
} );
</script>
"""%(aoSorting)
fmtr = SimpleHTMLFormatter(title="CMSSW integration builds", style=style, outFile = open(join(htmlDir,"cmsCRPage.html"), "w"))
bv = BuildViewer(fmtr, pickleDir, logsDir, htmlDir)
bv.showResults() | [
22
] |
def METHOD_NAME(self):
params = {'type': [DataSourceTypeChoices.LOCAL]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) | [
9,
44
] |
def METHOD_NAME(app):
return app['verbose_name'].lower()
# Could instead sort by model class name
#return str(app['model']).lower() | [
266,
604
] |
def METHOD_NAME(cls):
cls.bot_reference = HTMLTableParserBot
cls.default_input_message = EXAMPLE_REPORT | [
0,
1227
] |
async def METHOD_NAME(self, reset):
if self._addr_dut_reset is not None:
await self._device.write_register(self._addr_dut_reset, int(reset)) | [
0,
656
] |
def METHOD_NAME(self):
return modelformset_factory(self.formset_model, **self.get_formset_factory_kwargs()) | [
19,
7593,
2
] |
def METHOD_NAME(
path: Union[pathlib.Path, str], relative_to: Union[pathlib.Path, str] = None
) -> str:
"""
Convert a path to a displayable path. The absolute path or relative path to the
current (or given) directory will be returned, whichever is shorter.
"""
path, relative_to = (
pathlib.Path(path).resolve(),
pathlib.Path(relative_to or ".").resolve(),
)
relative_path = str(path.relative_to(relative_to))
absolute_path = str(path)
return relative_path if len(relative_path) < len(absolute_path) else absolute_path | [
24,
52,
157
] |
def METHOD_NAME(query, on):
"Create update or delete the query compliance check"
created = updated = deleted = False
if on:
if not isinstance(query.version, int):
query.refresh_from_db()
cc_defaults = {
"model": OsqueryCheck.get_model(),
"name": query.name,
"version": query.version,
"description": query.description
}
if not query.compliance_check:
query.compliance_check = ComplianceCheck.objects.create(**cc_defaults)
query.save()
created = True
else:
for key, val in cc_defaults.items():
if getattr(query.compliance_check, key) != val:
setattr(query.compliance_check, key, val)
updated = True
if updated:
query.compliance_check.save()
elif query.compliance_check:
query.compliance_check.delete()
deleted = True
return created, updated, deleted | [
164,
539,
4495,
250
] |
def METHOD_NAME(data: np.ndarray, sigma: float=0.02, clip: float=0.05):
assert clip > 0
jittered_data = np.clip(sigma * np.random.randn(*data.shape), -clip, clip)
data = data + jittered_data
return data | [
11509,
1669,
4054
] |
def METHOD_NAME(self):
cached_object.MOCK_REDIS_CACHE = fake_cache | [
0,
1
] |
def METHOD_NAME(td, r_0):
"""Calculate incident countrate given dead time and detected countrate."""
tau = 1 / r_0
return 1.0 / (tau - td) | [
3264,
623
] |
def METHOD_NAME(arr, tree, node, start, end):
if start == end:
tree[node] = arr[start]
return
mid = (start + end) // 2
METHOD_NAME(arr, tree, 2 * node, start, mid)
METHOD_NAME(arr, tree, 2 * node + 1, mid + 1, end)
tree[node] = tree[2 * node] + tree[2 * node + 1] | [
56,
4373,
151
] |
def METHOD_NAME(model, layer_index, filter_index):
_, conv = list(model.features._modules.items())[layer_index]
next_conv = None
offset = 1
while layer_index + offset < len(list(model.features._modules.items())):
res = list(model.features._modules.items())[layer_index+offset]
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
offset = offset + 1
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - 1,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups)
#bias = conv.bias)
new_conv.bias = torch.nn.Parameter(conv.bias)
old_weights = conv.weight.data.cpu().numpy()
new_weights = new_conv.weight.data.cpu().numpy()
if filter_index > new_weights.shape[0]:
return model
new_weights[: filter_index, :, :, :] = old_weights[: filter_index, :, :, :]
new_weights[filter_index : , :, :, :] = old_weights[filter_index + 1 :, :, :, :]
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
bias_numpy = conv.bias.data.cpu().numpy()
bias = np.zeros(shape = (bias_numpy.shape[0] - 1), dtype = np.float32)
bias[:filter_index] = bias_numpy[:filter_index]
bias[filter_index : ] = bias_numpy[filter_index + 1 :]
new_conv.bias.data = torch.from_numpy(bias).cuda()
if not next_conv is None:
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - 1,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,)
#bias = next_conv.bias)
next_new_conv.bias = torch.nn.Parameter(next_conv.bias)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights[:, : filter_index, :, :] = old_weights[:, : filter_index, :, :]
new_weights[:, filter_index : , :, :] = old_weights[:, filter_index + 1 :, :, :]
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
next_new_conv.bias.data = next_conv.bias.data
if not next_conv is None:
features = torch.nn.Sequential(
*(replace_layers(model.features, i, [layer_index, layer_index+offset], \
[new_conv, next_new_conv]) for i, _ in enumerate(model.features)))
del model.features
del conv
model.features = features
else:
#Prunning the last conv layer. This affects the first linear layer of the classifier.
model.features = torch.nn.Sequential(
*(replace_layers(model.features, i, [layer_index], \
[new_conv]) for i, _ in enumerate(model.features)))
layer_index = 0
old_linear_layer = None
for _, module in model.classifier._modules.items():
if isinstance(module, torch.nn.Linear):
old_linear_layer = module
break
layer_index = layer_index + 1
if old_linear_layer is None:
raise BaseException("No linear laye found in classifier")
params_per_input_channel = old_linear_layer.in_features // conv.out_channels
new_linear_layer = \
torch.nn.Linear(old_linear_layer.in_features - params_per_input_channel,
old_linear_layer.out_features)
old_weights = old_linear_layer.weight.data.cpu().numpy()
new_weights = new_linear_layer.weight.data.cpu().numpy()
new_weights[:, : filter_index * params_per_input_channel] = \
old_weights[:, : filter_index * params_per_input_channel]
new_weights[:, filter_index * params_per_input_channel :] = \
old_weights[:, (filter_index + 1) * params_per_input_channel :]
new_linear_layer.bias.data = old_linear_layer.bias.data
new_linear_layer.weight.data = torch.from_numpy(new_weights).cuda()
classifier = torch.nn.Sequential(
*(replace_layers(model.classifier, i, [layer_index], \
[new_linear_layer]) for i, _ in enumerate(model.classifier)))
del model.classifier
del next_conv
del conv
model.classifier = classifier
return model | [
3724,
4401,
1306,
94
] |
def METHOD_NAME(self):
table = Table(data)
port_ids = []
for i in range(10):
port_ids.append(table.make_port())
assert port_ids == list(range(1, 11))
view = table.view()
ports_to_update = [random.randint(0, 10) for i in range(5)]
def callback(port_id):
assert port_id in ports_to_update
view.on_update(callback)
for port in ports_to_update:
table.update(data, port_id=port) | [
9,
907,
427,
246,
959,
217,
5241
] |
def METHOD_NAME(*args, **kwargs):
"""Wraps sqlite3.dbapi.connect(), returning a wrapped connection."""
global connect_counter
conn = old_connect(*args, **kwargs) # pylint: disable=not-callable
return ConnectionWrapper(conn) | [
707
] |
def METHOD_NAME(self, position_index):
"""
Sets new aperture position
Args:
position_index: position index (int)
Returns:
"""
self.chan_position.set_value(self._position_list[position_index]) | [
0,
195,
724
] |
def METHOD_NAME(params): ... | [
129
] |
def METHOD_NAME(tab1, tab2):
"""Helper function to check tab match"""
assert tab1.child is tab2.child
assert tab1.name == tab2.name
assert tab1.title == tab2.title | [
638,
5678,
137,
7305
] |
def METHOD_NAME(self):
aliases = [
"SystemAsterisk",
"SystemExclamation",
"SystemExit",
"SystemHand",
"SystemQuestion",
]
for alias in aliases:
with self.subTest(alias=alias):
safe_PlaySound(alias, winsound.SND_ALIAS) | [
9,
2334
] |
def METHOD_NAME(tmp_path):
with tempconfig(
{
"media_dir": tmp_path,
"save_sections": True,
"log_to_file": True,
"frame_rate": 15,
"pixel_height": 854,
"pixel_width": 480,
"save_sections": True,
"sections_dir": "{media_dir}/test_sections",
"video_dir": "{media_dir}/test_video",
"partial_movie_dir": "{media_dir}/test_partial_movie_dir",
"images_dir": "{media_dir}/test_images",
"text_dir": "{media_dir}/test_text",
"tex_dir": "{media_dir}/test_tex",
"log_dir": "{media_dir}/test_log",
}
):
scene = MyScene()
scene.render()
tmp_path = Path(tmp_path)
assert_dir_filled(tmp_path / "test_sections")
assert_file_exists(tmp_path / "test_sections/MyScene.json")
assert_dir_filled(tmp_path / "test_video")
assert_file_exists(tmp_path / "test_video/MyScene.mp4")
assert_dir_filled(tmp_path / "test_partial_movie_dir")
assert_file_exists(
tmp_path / "test_partial_movie_dir/partial_movie_file_list.txt"
)
# TODO: another example with image output would be nice
assert_dir_exists(tmp_path / "test_images")
assert_dir_filled(tmp_path / "test_text")
assert_dir_filled(tmp_path / "test_tex")
assert_dir_filled(tmp_path / "test_log") | [
9,
343,
2413
] |
def METHOD_NAME(modelpath, filepath, sceneitems={}):
"""Writes loaded model under modelpath to a file in SBML format.
(helper function for writeSBML).
"""
global sbmlImport_, sbmlError_
if not sbmlImport_:
raise ImportError(
"SBML support could not be loaded because of '%s'" % sbmlError_
)
return _writeSBML.METHOD_NAME(modelpath, filepath, sceneitems) | [
13866,
77,
8040
] |
def METHOD_NAME(self):
super(TestEditorJournalReview, self).METHOD_NAME()
self.editor_group_pull = models.EditorGroup.pull_by_key
models.EditorGroup.pull_by_key = editor_group_pull
self.old_lookup_code = lcc.lookup_code
lcc.lookup_code = mock_lookup_code | [
0,
1
] |
def METHOD_NAME(page_name, **kwargs):
"""Returns panda.DataFrames.
This is suitable mock for pd.read_excel method when used together with
FakeExtractor.
"""
fake_data = {
"books-2010": pd.DataFrame.from_dict(
{"book_title": ["Tao Te Ching"], "name": ["Laozi"], "pages": [0]}
),
"books-2011": pd.DataFrame.from_dict(
{
"title_of_book": ["The Tao of Pooh"],
"author": ["Benjamin Hoff"],
"pages": [158],
}
),
"boxes-2010": pd.DataFrame.from_dict(
{"composition": ["cardboard"], "size_inches": [10]}
),
"boxes-2011": pd.DataFrame.from_dict(
{"composition": ["metal"], "size_cm": [99]}
),
}
return fake_data[page_name] | [
1278,
365,
1427
] |
def METHOD_NAME(self, attribute, value):
if value > self.y_max:
raise ValueError("'y_min' must be smaller than 'y_max'") | [
250,
320,
1835
] |
def METHOD_NAME(arch):
archlib_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
arch.name + "_defs.py")
return load_module(archlib_path) | [
557,
2837,
124
] |
def METHOD_NAME(client, config):
# ensure the new index is added to the alias used for incremental loads.
# If the alias is on multiple indexes, the loads will fail!
logger.info(format_log(f"Putting alias '{config['write_alias']}' on {config['index_name']}", action="ES Alias"))
put_alias(client, config["index_name"], config["write_alias"], {}) | [
129,
557,
533
] |
def METHOD_NAME(style: ttk.Style) -> None:
style.theme_create(
THEME_DARK,
"clam",
{
".": {
"configure": {
"background": Colors.frame,
"foreground": Colors.white,
"bordercolor": Colors.darkest,
"darkcolor": Colors.dark,
"lightcolor": Colors.lighter,
"troughcolor": Colors.darker,
"selectbackground": Colors.selectbg,
"selectforeground": Colors.selectfg,
"selectborderwidth": 0,
"font": "TkDefaultFont",
},
"map": {
"background": [
("disabled", Colors.frame),
("active", Colors.lighter),
],
"foreground": [("disabled", Colors.disabledfg)],
"selectbackground": [("!focus", Colors.darkest)],
"selectforeground": [("!focus", Colors.white)],
},
},
"TButton": {
"configure": {
"width": 8,
"padding": (5, 1),
"relief": tk.RAISED,
"anchor": tk.CENTER,
},
"map": {
"relief": [("pressed", tk.SUNKEN)],
"shiftrelief": [("pressed", 1)],
},
},
"TMenubutton": {"configure": {"padding": (5, 1), "relief": tk.RAISED}},
"TCheckbutton": {
"configure": {
"indicatorbackground": Colors.white,
"indicatormargin": (1, 1, 4, 1),
}
},
"TRadiobutton": {
"configure": {
"indicatorbackground": Colors.white,
"indicatormargin": (1, 1, 4, 1),
}
},
"TEntry": {
"configure": {
"fieldbackground": Colors.white,
"foreground": Colors.black,
"padding": (2, 0),
},
"map": {"fieldbackground": [("disabled", Colors.frame)]},
},
"TSpinbox": {
"configure": {
"fieldbackground": Colors.white,
"foreground": Colors.black,
"padding": (2, 0),
},
"map": {"fieldbackground": [("disabled", Colors.frame)]},
},
"TCombobox": {
"configure": {
"fieldbackground": Colors.white,
"foreground": Colors.black,
"padding": (2, 0),
}
},
"TLabelframe": {"configure": {"relief": tk.GROOVE}},
"TNotebook.Tab": {
"configure": {"padding": (6, 2, 6, 2)},
"map": {"background": [("selected", Colors.lighter)]},
},
"Treeview": {
"configure": {
"fieldbackground": Colors.white,
"background": Colors.white,
"foreground": Colors.black,
},
"map": {
"background": [("selected", Colors.selectbg)],
"foreground": [("selected", Colors.selectfg)],
},
},
Styles.tooltip: {
"configure": {"justify": tk.LEFT, "relief": tk.SOLID, "borderwidth": 0}
},
Styles.tooltip_frame: {"configure": {}},
Styles.service_checkbutton: {
"configure": {
"background": Colors.listboxbg,
"foreground": Colors.black,
}
},
},
) | [
557
] |
def METHOD_NAME(self, value):
if value not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[value] = [value, curr, end] | [
238
] |
def METHOD_NAME(fg, alpha, img, bg, trans_info, writer, fg_estimate):
"""
Postprocess for prediction results.
Args:
fg (Tensor): The foreground, value should be in [0, 1].
alpha (Tensor): The alpha, value should be in [0, 1].
img (Tensor): The original image, value should be in [0, 1].
trans_info (list): A list of the shape transformations.
writers (dict): A dict of VideoWriter instance.
fg_estimate (bool): Whether to estimate foreground. It is invalid when fg is not None.
"""
alpha = reverse_transform(alpha, trans_info)
bg = F.interpolate(bg, size=alpha.shape[-2:], mode='bilinear')
if fg is None:
if fg_estimate:
img = img.transpose((0, 2, 3, 1)).squeeze().numpy()
alpha = alpha.squeeze().numpy()
fg = estimate_foreground_ml(img, alpha)
bg = bg.transpose((0, 2, 3, 1)).squeeze().numpy()
else:
fg = img
else:
fg = reverse_transform(fg, trans_info)
if len(alpha.shape) == 2:
alpha = alpha[:, :, None]
new_img = alpha * fg + (1 - alpha) * bg
writer.write(new_img) | [
1710
] |
def METHOD_NAME(self):
"""
This function visualizes the linear unmixing result, represented by the blood oxygen saturation.
The user has to check if the test was successful.
"""
sp.simulate(self.pipeline, self.settings, self.device)
# Run linear unmixing component with above specified settings
sp.LinearUnmixing(self.settings, "linear_unmixing").run()
self.logger.info("Testing linear unmixing...")
# Load blood oxygen saturation
self.lu_results = sp.load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH], Tags.LINEAR_UNMIXING_RESULT)
self.sO2 = self.lu_results["sO2"]
# Load reference absorption for the first wavelength
self.mua = sp.load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH], Tags.DATA_FIELD_ABSORPTION_PER_CM,
wavelength=self.VISUAL_WAVELENGTHS[0]) | [
407,
9
] |
def METHOD_NAME(self):
if self.nmu_enable:
return self.nmu_info,[d.output() for d in self.nmu.data]
else:
print("noc_nmu feature disabled") | [
19,
365
] |
def METHOD_NAME() -> tkinter.Tk:
"""Return the tkinter root window that Porcupine is using."""
return _get_state().root | [
19,
57,
1092
] |
def METHOD_NAME(input_string: str, char: str) -> str:
if not input_string.endswith(char):
input_string += char
return input_string | [
602,
1531,
41
] |
def METHOD_NAME(cls, node, runtime, keys, id_generator):
"""
Construct this XBlock from the given XML node.
"""
block = runtime.construct_xblock_from_class(cls, keys)
block.content = str(node.text or "")
if 'type' in node.attrib: # 'type' is optional - default is 'completed'
block.type = node.attrib['type']
for child in node:
block.content += etree.tostring(child, encoding='unicode')
return block | [
214,
399
] |
def METHOD_NAME(client):
"""Tests cancel a task with uid 1."""
task = client.cancel_tasks({"uids": ["1", "2"]})
client.wait_for_task(task.task_uid)
tasks = client.get_tasks({"types": "taskCancelation"})
assert isinstance(task, TaskInfo)
assert task.task_uid is not None
assert task.index_uid is None
assert task.type == "taskCancelation"
assert "uids" in tasks.results[0].details["originalFilter"]
assert "uids=1%2C2" in tasks.results[0].details["originalFilter"] | [
9,
608,
620
] |
def METHOD_NAME(cfg, outs):
"""Schedule for bitserial_dense.
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial dense operator.
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for bitserial_dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data_vec, weight_vec, output):
s[data_vec].parallel(s[data_vec].op.axis[0])
s[weight_vec].parallel(s[weight_vec].op.axis[0])
y, x = s[output].op.axis
wb, db, k = s[output].op.reduce_axis
yo, yi = cfg["tile_y"].apply(s, output, y)
xo, xi = cfg["tile_x"].apply(s, output, x)
ko, ki = cfg["tile_k"].apply(s, output, k)
cfg["reorder_0"].apply(s, output, [yo, xo, ko, yi, wb, db, ki, xi])
cfg["ann_reduce"].apply(
s,
output,
[db, wb],
axis_lens=[get_const_int(db.dom.extent), get_const_int(wb.dom.extent)],
max_unroll=8,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
output,
[yi, xi],
axis_lens=[cfg["tile_y"].size[-1], cfg["tile_x"].size[-1]],
max_unroll=8,
cfg=cfg,
)
s[output].vectorize(xi)
s[output].parallel(yo)
return s
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or "elemwise" in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif op.tag == "bitserial_dense" or "bitserial_dense_unipolar":
output = op.output(0)
weight_vec = op.input_tensors[0]
data_vec = op.input_tensors[1]
data = data_vec.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
data = data.op.input_tensors[0]
_schedule(cfg, s, data_vec, weight_vec, output)
else:
raise RuntimeError(f"Unsupported operator: {op.tag}")
traverse(outs[0].op)
return s | [
507,
6464,
3829
] |
def METHOD_NAME(self, args: 'cirq.ApplyUnitaryArgs'):
if isinstance(self.exponent, sympy.Basic):
return NotImplemented
n = int(np.prod([args.target_tensor.shape[k] for k in args.axes], dtype=np.int64))
for i in range(n):
p = 1j ** (4 * i / n * self.exponent)
args.target_tensor[args.subspace_index(big_endian_bits_int=i)] *= p
return args.target_tensor | [
231,
7544
] |
def METHOD_NAME(self) -> Optional[int]:
return pulumi.get(self, "duration_seconds") | [
2205,
633
] |
def METHOD_NAME(self, *args):
"""Same as :meth:`send`, but sends an exception to waiters.
The arguments to send_exception are the same as the arguments
to ``raise``. If a single exception object is passed in, it
will be re-raised when :meth:`wait` is called, generating a
new stacktrace.
>>> from eventlet import event
>>> evt = event.Event()
>>> evt.send_exception(RuntimeError())
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "eventlet/event.py", line 120, in wait
current.throw(*self._exc)
RuntimeError
If it's important to preserve the entire original stack trace,
you must pass in the entire :func:`sys.exc_info` tuple.
>>> import sys
>>> evt = event.Event()
>>> try:
... raise RuntimeError()
... except RuntimeError:
... evt.send_exception(*sys.exc_info())
...
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "eventlet/event.py", line 120, in wait
current.throw(*self._exc)
File "<stdin>", line 2, in <module>
RuntimeError
Note that doing so stores a traceback object directly on the
Event object, which may cause reference cycles. See the
:func:`sys.exc_info` documentation.
"""
# the arguments and the same as for greenlet.throw
return self.send(None, args) | [
353,
442
] |
def METHOD_NAME(s, accept_tags, group_by, max_items=3):
tag = []
text = b""
count = 0
current = {}
while True:
char = s.read(1)
if len(char) == 0:
break
if char == b"<":
next_char = s.read(1)
# Discard stuff like <?xml vers...
if next_char == b"?":
discard_until(s, b">")
continue
# Detect <![CDATA
elif next_char == b"!":
s.read(1) # Discard [
discard_until(s, b"[") # Discard CDATA[
text = read_until(s, b"]")
discard_until(s, b">") # Discard ]>
gc.collect()
elif next_char == b"/":
current_tag = read_until(s, b">")
top_tag = tag[-1]
# Populate our result dict
if top_tag in accept_tags:
current[top_tag.decode("utf-8")] = text.decode("utf-8")
# If we've found a group of items, yield the dict
elif top_tag == group_by:
yield current
current = {}
count += 1
if count == max_items:
return
tag.pop()
text = b""
gc.collect()
continue
else:
current_tag = read_until(s, b">")
tag += [next_char + current_tag.split(b" ")[0]]
text = b""
gc.collect()
else:
text += char | [
214,
399,
919
] |
def METHOD_NAME(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42) | [
9,
1819,
222
] |
def METHOD_NAME(self, dump=False, func_name=None):
raise NotImplementedError | [
19,
1737,
365
] |
def METHOD_NAME(self, old, new, fields=None):
"""
Compare data to help changes
NB
must be same sweep
( same index must be same frequence )
:param old:
:param new:
"""
fields = fields or [
("freq", str),
]
def no_compare():
return {k: "-" for k, _ in fields}
old_idx = sorted(old.keys())
# 'odict_keys' object is not subscriptable
new_idx = sorted(new.keys())
diff = {}
i_max = min(len(old_idx), len(new_idx))
i_tot = max(len(old_idx), len(new_idx))
if i_max != i_tot:
logger.warning(
"resonances changed from %s to %s", len(old_idx), len(new_idx)
)
split = 0
max_delta_f = 1_000_000
for i, k in enumerate(new_idx):
if len(old_idx) <= i + split:
diff[i] = no_compare()
continue
logger.info("Resonance %s at %s", i, new[k]["freq"])
delta_f = new[k]["freq"] - old[old_idx[i + split]]["freq"]
if abs(delta_f) < max_delta_f:
logger.debug("can compare")
diff[i] = {
desc: fnc(new[k][desc] - old[old_idx[i + split]][desc])
for desc, fnc in fields
}
logger.debug("Deltas %s", diff[i])
continue
logger.debug(
"can't compare, %s is too much ", format_frequency(delta_f)
)
if delta_f > 0:
logger.debug("possible missing band, ")
if len(old_idx) > (i + split + 1):
if (
abs(
new[k]["freq"] - old[old_idx[i + split + 1]]["freq"]
)
< max_delta_f
):
logger.debug("new is missing band, compare next ")
split += 1
# FIXME: manage 2 or more band missing ?!?
continue
logger.debug("new band, non compare ")
diff[i] = no_compare()
continue
logger.debug("new band, non compare ")
diff[i] = no_compare()
split -= 1
for i in range(i_max, i_tot):
# add missing in old ... if any
diff[i] = no_compare()
return diff | [
979
] |
METHOD_NAME(self): | [
697,
-1
] |
def METHOD_NAME(color, width, dashStyle=DashStyleSolid):
"""Creates a GDI+ pen that is automatically destroyed when finished drawing.
@param color: an ARGB color.
@type color: int
@param width: The width of the pen, in pixels.
@type width: int
@param dashStyle: The style of the line(s) to be drawn.
This is one of the C{DashStyle*} constants.
Defaults to C{DashStyleSolid}, which draws solid lines.
@type dashStyle: int
"""
gpPen = c_void_p()
gpStatus = gdiplus.GdipCreatePen1(color, width, UnitPixel, byref(gpPen))
if gpStatus:
raise RuntimeError("GdipCreatePen1 failed with status code %d" % gpStatus)
gpStatus = gdiplus.GdipSetPenDashStyle(gpPen, dashStyle)
if gpStatus:
raise RuntimeError("GdipSetPenDashStyle failed with status code %d" % gpStatus)
try:
yield gpPen
finally:
gdiplus.GdipDeletePen(gpPen) | [
11782,
222,
15303
] |
def METHOD_NAME(self) -> None:
with patch("antlir.rpm.storage.s3_storage.open_url") as open_url:
with self.storage.reader("test/prefix/1234") as _:
pass
open_url.assert_called_with(
"https://antlir-test.s3-test-region.amazonaws.com/" "test/prefix/1234"
) | [
9,
781,
274
] |
def METHOD_NAME(*args, **kwargs):
return AcunetixPlugin(*args, **kwargs) | [
129,
2793
] |
def METHOD_NAME(self, md):
# replace HashHeader/SetextHeader processors with our custom variants
md.parser.blockprocessors.register(
CustomHashHeaderProcessor(md.parser), "hashheader", 70
)
md.parser.blockprocessors.register(
CustomSetextHeaderProcessor(md.parser), "setextheader", 60
)
# the tree processor adds the actual edit links
add_to_registry(
md.treeprocessors,
"editsection",
EditSectionProcessor(self.config, md),
"_end",
) | [
978,
108
] |
def METHOD_NAME(self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
if self.skip_build:
self.plat_name = get_platform()
else:
self.plat_name = self.get_finalized_command('build').plat_name
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create built distributions "
"on platform %s" % os.name)
if self.dist_dir is None:
self.dist_dir = "dist" | [
977,
1881
] |
def METHOD_NAME(self) -> _Structure:
"""Structure of the structured sampler formatted as a
:func:`~collections.namedtuple` where the 3-tuple values are the
:attr:`.nodelist`, :attr:`.edgelist` and :attr:`.adjacency` attributes.
"""
return _Structure(self.nodelist, self.edgelist, self.adjacency) | [
1011
] |
def METHOD_NAME(self):
credit = self.pop("credit")
if credit is not None and credit.strip():
context = self._get_context()
context["media_credit"][-1]["content"] = credit | [
1798,
1091,
8534
] |
def METHOD_NAME(self, block_index, seg_index, i_start, i_stop,
stream_index, channel_indexes):
stream_id = self.header['signal_streams'][stream_index]['id']
global_channel_indexes, = np.nonzero(self.header['signal_channels']
['stream_id'] == stream_id)
if channel_indexes is None:
channel_indexes = slice(None)
inds = global_channel_indexes[channel_indexes]
raw_signals = self._raw_signals[seg_index][slice(i_start, i_stop), inds]
return raw_signals | [
19,
-1,
464
] |
def METHOD_NAME(self, mac_tag):
"""Verify that a given **binary** MAC (computed by another party)
is valid.
Args:
mac_tag (byte string/byte string/memoryview): the expected MAC of the message.
Raises:
ValueError: if the MAC does not match. It means that the message
has been tampered with or that the MAC key is incorrect.
"""
secret = get_random_bytes(16)
mac1 = BLAKE2s.new(digest_bits=160, key=secret, data=mac_tag)
mac2 = BLAKE2s.new(digest_bits=160, key=secret, data=self.digest())
if mac1.digest() != mac2.digest():
raise ValueError("MAC check failed") | [
1162
] |
def METHOD_NAME(self):
tf.compat.v1.disable_eager_execution()
x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input")
if tf.version.VERSION <= "2.1.0":
x = tf.nn.relu(x)
conv_weights = tf.compat.v1.get_variable(
"weights", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer()
)
conv = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME")
normed = tf.nn.bias_add(
conv,
tf.constant(
[
3.0,
1.2,
1.0,
2,
3,
4,
5,
6,
7,
8,
0,
1,
4.0,
5.2,
8.1,
2,
4,
5,
8,
9,
10,
12,
11,
2,
5.0,
7.2,
3.2,
3,
4,
5,
7,
8,
]
),
)
relu = tf.nn.relu(normed, name="Relu_0")
op_wise_sequences = TensorflowQuery(
local_config_file=os.path.join(os.path.dirname(__file__), "../../neural_compressor/adaptor/tensorflow.yaml")
).get_eightbit_patterns()
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]]
)
output_graph_def = QuantizeGraphHelper.remove_training_nodes(
output_graph_def, protected_nodes=[relu.name.split(":")[0]]
)
inputs = [x.name.split(":")[0]]
outputs = [relu.name.split(":")[0]]
op_wise_config = {
"Conv2D": (False, "minmax", False, 7.0),
}
int8_graph_def, _, _ = QuantizeGraphForIntel(
output_graph_def, inputs, outputs, op_wise_config, op_wise_sequences, "cpu"
).do_transform()
correct_graph_def = BiasCorrection(int8_graph_def, output_graph_def).do_transformation()
self.assertEqual(len(correct_graph_def.node), len(int8_graph_def.node)) | [
9,
1173,
2451,
2228,
58
] |
def METHOD_NAME(event):
end_name = "head" if event.element is head.subject else "tail"
self.update_end_name(builder, end_name, event.element) | [
156,
1519
] |
def METHOD_NAME(graph: GraphRepr) -> Set[str]:
edge_list: List[str] = []
for _, dependent_collections in graph.items():
for _, edges in dependent_collections.items():
if edges:
edge_list.extend(edges)
return set(edge_list) | [
75,
491
] |
async def METHOD_NAME(
protocol, mocker, flow_cls, return_responses, return_class, use_stream
):
r_val = mocker.Mock()
with flow_cls(protocol=protocol, asyncio=True).add() as f:
async for r in f.index(
from_ndarray(np.random.random([num_docs, 4])),
on_done=r_val,
return_responses=return_responses,
stream=use_stream
):
assert isinstance(r, return_class)
validate_callback(r_val, validate) | [
9,
22,
958,
233
] |
def METHOD_NAME(self):
"""Create a test HDF4 file."""
from pyhdf.SD import SD, SDC
h = SD('test.hdf', SDC.WRITE | SDC.CREATE | SDC.TRUNC)
data = np.arange(10. * 100, dtype=np.float32).reshape((10, 100))
v1 = h.create('ds1_f', SDC.FLOAT32, (10, 100))
v1[:] = data
v2 = h.create('ds1_i', SDC.INT16, (10, 100))
v2[:] = data.astype(np.int16)
# Add attributes
h.test_attr_str = 'test_string'
h.test_attr_int = 0
h.test_attr_float = 1.2
# h.test_attr_str_arr = np.array(b"test_string2")
for d in [v1, v2]:
d.test_attr_str = 'test_string'
d.test_attr_int = 0
d.test_attr_float = 1.2
h.end() | [
0,
1
] |
def METHOD_NAME(reports: List[str] = ASSERTIONS, save_new: bool = True):
excludes, indexer_swgr, algod_swgr = tsetup()
"""
For each report in reports:
1. load the pre-existing yaml report into `old_diff`
2. re-generate the equivalent report by comparing `algod_swgr` with `indexer_swgr`
3. compute the `diff_of_diffs` between these two reports
4. assert that there is no diff
"""
if save_new:
save_reports(*reports)
for diff_type in reports:
ypath = get_report_path(diff_type, for_write=False)
with open(ypath, "r") as f:
old_diff = yaml.safe_load(f)
new_diff = generate_diff(algod_swgr, indexer_swgr, excludes, diff_type)
diff_of_diffs = deep_diff(old_diff, new_diff, arraysets=True)
assert (
diff_of_diffs is None
), f"""UNEXPECTED CHANGE IN {ypath}. Differences are: | [
9,
12270
] |
def METHOD_NAME(self) -> None:
ocio.GetCurrentConfig().clearNamedTransforms() | [
537,
1768
] |
def METHOD_NAME(sender, instance, **kwargs):
"""Ensure default_answers are cleanedup."""
instance.default_answer.delete() | [
950,
235,
3485
] |
def METHOD_NAME(expected):
def check(actual):
actual = pd.concat(actual)
if sorted(expected) != sorted(actual):
raise AssertionError('Series not equal: \n%s\n%s\n' % (expected, actual))
return check | [
926,
24,
1315,
4045
] |
def METHOD_NAME(coefficients_functions):
""" For randomly set A, B, C, F, g functions. The generated parameters must equal
those given by equations.
"""
A, B, C, cnst_F, cnst_g, path, inv_dyn = coefficients_functions
constraint = toppra.constraint.SecondOrderConstraint(
inv_dyn, cnst_F, cnst_g, dof=2,
discretization_scheme=toppra.constraint.DiscretizationType.Collocation)
a, b, c, F, g, _, _ = constraint.compute_constraint_params(
path, np.linspace(0, path.duration, 10))
# Correct params
q_vec = path(np.linspace(0, path.duration, 10))
qs_vec = path(np.linspace(0, path.duration, 10), 1)
qss_vec = path(np.linspace(0, path.duration, 10), 2)
for i in range(10):
ai_ = A(q_vec[i]).dot(qs_vec[i])
bi_ = A(q_vec[i]).dot(qss_vec[i]) + np.dot(qs_vec[i].T, B(q_vec[i]).dot(qs_vec[i]))
ci_ = C(q_vec[i])
np.testing.assert_allclose(ai_, a[i])
np.testing.assert_allclose(bi_, b[i])
np.testing.assert_allclose(ci_, c[i])
np.testing.assert_allclose(cnst_F(q_vec[i]), F[i])
np.testing.assert_allclose(cnst_g(q_vec[i]), g[i]) | [
9,
7050
] |
def METHOD_NAME(testerchain):
worker_private_key = os.urandom(32).hex()
address = testerchain.provider.ethereum_tester.add_account(
worker_private_key,
password=INSECURE_DEVELOPMENT_PASSWORD
)
tx = {'to': address,
'from': testerchain.etherbase_account,
'value': Web3.to_wei('1', 'ether')}
txhash = testerchain.client.w3.eth.send_transaction(tx)
_receipt = testerchain.wait_for_receipt(txhash)
yield address | [
2663,
837
] |
def METHOD_NAME(self):
pyfunc = compare_usecase
cfunc = jit(nopython=True)(pyfunc)
for args in self.pairs:
self.assertPreciseEqual(pyfunc(*args), cfunc(*args)) | [
9,
979
] |
def METHOD_NAME(pgconn):
with pytest.raises(psycopg.OperationalError):
pgconn.put_copy_data(b"wat")
pgconn.finish()
with pytest.raises(psycopg.OperationalError):
pgconn.put_copy_data(b"wat") | [
9,
1276,
365,
654,
215
] |
def METHOD_NAME(self, known_commands=None, known_options=None, aliases=None):
return GetoptLexer(CommandLineLexer(),
known_commands=known_commands or self._known_commands,
known_options=known_options,
aliases=aliases or self._aliases) | [
363,
5886
] |
def METHOD_NAME(signor_data_file=None, signor_complexes_file=None):
"""Process Signor interaction data from the web.
This downloads the latest interaction data directly from the Signor
website without an intermediate local file.
Parameters
----------
signor_data_file : Optional[str]
If specified, the interaction data will be written to this file.
signor_complexes_file : Optional[str]
If specified, the complex data will be written to this file.
Returns
-------
indra.sources.signor.SignorProcessor
SignorProcessor containing Statements extracted from the Signor data.
"""
# Get interaction data
data_url = 'https://signor.uniroma2.it/download_entity.php'
res = requests.post(data_url, data={'organism': 'human', 'format': 'csv',
'submit': 'Download'})
data_iter = _handle_response(res, '\t', fname=signor_data_file)
# Get complexes
complexes_url = 'https://signor.uniroma2.it/download_complexes.php'
res = requests.post(complexes_url,
data={'submit': 'Download complex data'})
complexes_iter = _handle_response(res, ';', fname=signor_complexes_file)
return _processor_from_data(data_iter, complexes_iter) | [
356,
280,
2412
] |
def METHOD_NAME(self) -> str:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value") | [
99
] |
def METHOD_NAME(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0) | [
2011
] |
def METHOD_NAME(callback):
"""
Return `True` if the given view callback is a REST framework view/viewset.
"""
# Avoid import cycle on APIView
from rest_framework.views import APIView
cls = getattr(callback, 'cls', None)
return (cls is not None) and issubclass(cls, APIView) | [
137,
58,
1179
] |
def METHOD_NAME(name):
spec = request.args.get('spec')
if spec is None:
return bad_request("No test spec provided")
try:
returncode, stdout, stderr = pscheduler.plugin_invoke(
"test", name, "participants",
stdin=spec,
)
except KeyError:
return bad_request("Invalid spec")
except Exception as ex:
return bad_request(ex)
if returncode != 0:
return bad_request(stderr)
# If this fails because of bad JSON, an exception will be thrown,
# caught and logged.
return ok_json(pscheduler.json_load(stdout, max_schema=1), sanitize=False) | [
450,
156,
8200
] |
def METHOD_NAME(self):
gpf = GaussianProcessFilter()
sine_ts = tg.sine_timeseries(length=30, value_frequency=0.1)
noise_ts = tg.gaussian_timeseries(length=30) * 0.1
ts = sine_ts.stack(noise_ts)
prediction = gpf.filter(ts)
assert prediction.width == 2 | [
9,
4008,
356,
4049
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.