text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
networks = {
TESTNET1: create_network_config(
'nic',
IFACE0,
bridged=False,
static_ip_configuration=create_static_ip_configuration(
IPv4_ADDRESS1, IPv4_NETMASK1, None, None
),
),
TESTNET2: create_network_config(
'nic', IFACE0, bridged=False, vlan=VLAN101
),
}
state = nmstate.generate_state(networks=networks, bondings={})
bridgeless_state = create_ethernet_iface_state(IFACE0)
vlan0_state = create_vlan_iface_state(IFACE0, VLAN101)
ipv4_state = create_ipv4_state(IPv4_ADDRESS1, IPv4_PREFIX1)
ipv4_disabled_state = create_ipv4_state()
ipv6_disabled_state = create_ipv6_state()
bridgeless_state.update(ipv4_state)
bridgeless_state.update(ipv6_disabled_state)
vlan0_state.update(ipv4_disabled_state)
vlan0_state.update(ipv6_disabled_state)
expected_state = {nmstate.Interface.KEY: [bridgeless_state, vlan0_state]}
assert expected_state == state | [
9,
-1,
61,
4838,
7479,
69,
983
] |
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("StatisticsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem) | [
297,
365
] |
def METHOD_NAME(self, name:str, arr:np.ndarray):
"""
Vapor expects data to be in order='C' with X as the fastest varying dimension.
You can swap your axes with np.swapaxes(data, 0, -1).
"""
self.__checkNameValid(name)
# assert arr.dtype == np.float32
if arr.__array_interface__['strides']:
arr = arr.copy() # Flatten data
self._wrappedInstance.AddRegularData(name, np.float32(arr), arr.shape)
# TODO: Only clear necessary renderers
self.ses.ce.ClearAllRenderCaches() | [
238,
2028,
365
] |
def METHOD_NAME(self):
test_data = {"test": np.random.randint(0, 256, size=[3, 4, 4])}
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npy")
np.save(filepath, test_data, allow_pickle=True)
reader = NumpyReader()
result = reader.get_data(reader.read(filepath))[0].item()
np.testing.assert_allclose(result["test"].shape, test_data["test"].shape)
np.testing.assert_allclose(result["test"], test_data["test"]) | [
9,
9066,
1385
] |
def METHOD_NAME(self):
"""Delete the selected item."""
selItem = self.listBox.selectedItems()
if selItem:
self.listBox.takeItem(self.listBox.row(selItem[0]))
return | [
74,
34
] |
def METHOD_NAME(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.current_value = AAZIntType(
serialized_name="currentValue",
flags={"required": True},
)
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.limit = AAZIntType(
flags={"required": True},
)
_element.name = AAZObjectType(
flags={"required": True},
)
_element.unit = AAZStrType(
flags={"required": True},
)
name = cls._schema_on_200.value.Element.name
name.localized_value = AAZStrType(
serialized_name="localizedValue",
)
name.value = AAZStrType()
return cls._schema_on_200 | [
56,
135,
69,
1072
] |
def METHOD_NAME():
answer_extractor = AnswerExtractor(
model="uie-base-answer-extractor",
device=args.device,
schema=["答案"],
max_answer_candidates=3,
position_prob=0.01,
)
question_generator = QuestionGenerator(
model="unimo-text-1.0-question-generation",
device=args.device,
num_return_sequences=2,
)
qa_filter = QAFilter(
model="uie-base-qa-filter",
device=args.device,
schema=["答案"],
position_prob=0.1,
)
pipe = QAGenerationPipeline(
answer_extractor=answer_extractor, question_generator=question_generator, qa_filter=qa_filter
)
pipeline_params = {"QAFilter": {"is_filter": True}}
# list example
meta = [
"世界上最早的电影院是美国洛杉矶的“电气剧场”,建于1902年。",
"以脸书为例,2020年时,54%的成年人表示,他们从该平台获取新闻。而现在,这个数字下降到了44%。与此同时,YouTube在过去几年里一直保持平稳,约有三分之一的用户在该平台上获取新闻。",
]
prediction = pipe.run(meta=meta, params=pipeline_params)
prediction = prediction["filtered_cqa_triples"]
pprint(prediction)
# file example
if args.source_file:
meta = []
with open(args.source_file, "r", encoding="utf-8") as rf:
for line in rf:
meta.append(line.strip())
prediction = pipe.run(meta=meta, params=pipeline_params)
prediction = prediction["filtered_cqa_triples"]
if not os.path.exists(args.doc_dir):
os.makedirs(args.doc_dir)
with open(os.path.join(args.doc_dir, "generated_qa_pairs.txt"), "w", encoding="utf-8") as wf:
for pair in prediction:
wf.write(pair["synthetic_question"].strip() + "\t" + pair["synthetic_answer"].strip() + "\n") | [
1937,
552,
1148
] |
def METHOD_NAME(self):
self.dps[UNKNOWN20_DPS] = 20
self.dps[ERROR_DPS] = 22
self.assertDictEqual(
self.subject.extra_state_attributes,
{"unknown_20": 20, "error": 22},
) | [
9,
1967,
551,
177
] |
def METHOD_NAME(shard: str) -> None:
sharded_integration = integration.build_integration_instance_for_shard(
shard
)
sharded_integration.run(True) | [
22,
1911,
2366
] |
def METHOD_NAME(self) -> dict:
log_store = getattr(self, "_log_store", None)
if log_store is None:
log_store = self._log_store = {}
return log_store | [
19,
390,
1308
] |
def METHOD_NAME():
pipeline_path = test_input_dir / 'stdout-stderr-test.wasi.wasm'
with open(pipeline_path, 'rb') as fp:
wasm_bytes = fp.read()
pipeline = Pipeline(wasm_bytes)
pipeline.run([]) | [
9,
1148,
321
] |
def METHOD_NAME(self) -> List[Tuple[str, str]]:
return [
(_("Test"), reverse("admin_email_test")),
] | [
19,
200,
1116
] |
def METHOD_NAME(log_dir):
return [
os.path.join(log_dir, filename)
for filename in os.listdir(log_dir)
if os.path.isfile(os.path.join(log_dir, filename))
and filename.startswith("events.out.tfevents.")
] | [
19,
239,
12281
] |
def METHOD_NAME(cpus: Optional[str] = None,
memory: Optional[str] = None,
disk_tier: Optional[str] = None) -> Optional[str]:
del disk_tier # unused
if cpus is None:
cpus = f'{oci_conf.DEFAULT_NUM_VCPUS}+'
if memory is None:
memory_gb_or_ratio = f'{oci_conf.DEFAULT_MEMORY_CPU_RATIO}x'
else:
memory_gb_or_ratio = memory
instance_type_prefix = tuple(
f'{family}' for family in oci_conf.DEFAULT_INSTANCE_FAMILY)
df = _get_df()
df = df[df['InstanceType'].notna()]
df = df[df['InstanceType'].str.startswith(instance_type_prefix)]
logger.debug(f'# get_default_instance_type: {df}')
return common.get_instance_type_for_cpus_mem_impl(df, cpus,
memory_gb_or_ratio) | [
19,
235,
89,
44
] |
def METHOD_NAME(revision: str) -> bool:
# match SHA-1 git commit
return bool(re.match(r"\b[0-9a-f]{40}\b", revision)) | [
137,
1160
] |
def METHOD_NAME():
u1 = program_unitary(Program(S(0)), n_qubits=1)
u2 = program_unitary(_S(0), n_qubits=1)
assert equal_up_to_global_phase(u1, u2, atol=1e-12) | [
9,
1305
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The secondary key.
"""
return pulumi.get(self, "secondary_key") | [
3288,
59
] |
def METHOD_NAME(self):
with mock.patch('botocore.compat.MD5_AVAILABLE', False):
with self.assertRaises(MD5UnavailableError):
get_md5() | [
9,
1731,
45,
168
] |
def METHOD_NAME(self):
lats, lons = self._as_bounded_coords(
[[-80, -70], [0, 10], [70, 80]], [[0, 10], [10, 30]]
)
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(
area,
[
[3.19251846e11, 6.38503692e11],
[1.22880059e12, 2.45760119e12],
[3.19251846e11, 6.38503692e11],
],
) | [
9,
690,
107,
-1,
61,
-1
] |
def METHOD_NAME(self):
del self.info.settings.compiler | [
360,
147
] |
def METHOD_NAME(self):
data = self.get_data()
if not data:
data = {}
if not self.adapter:
return {}
return self.adapter.METHOD_NAME(data) | [
924
] |
def METHOD_NAME(self, MainWindow):
MainWindow.setObjectName("EIVideo")
MainWindow.resize(1101, 751)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(20, 20, 1271, 771))
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.cap = []
self.all_frames = []
self.fps = None
self.timer = QTimer(self.frame)
self.time_label = QLabel('--/--', self.frame)
self.progress_slider = QSlider(self.frame)
self.progress_slider.setEnabled(True)
self.progress_slider.setOrientation(Qt.Horizontal)
self.progress_slider.setFixedWidth(710)
self.progress_slider.setFixedHeight(20)
self.progress_slider.setSingleStep(1) # 设置变化步长
self.progress_slider.setValue(0)
self.progress_slider.sliderReleased.connect(self.update_video_position_func) # 拖拽进度条
self.picturelabel = QtWidgets.QLabel(self.frame)
self.picturelabel.setGeometry(30, 30, 810, 458)
self.picturelabel.setText("")
self.picturelabel.setObjectName("picturelabel")
self.paintBoard = PaintBoard(self.frame)
self.paintBoard.setGeometry(30, 30, 810, 458)
self.cbtn_Eraser = QCheckBox("橡皮擦")
self.cbtn_Eraser.setParent(self.frame)
self.cbtn_Eraser.move(950, 40)
self.cbtn_Eraser.clicked.connect(self.on_cbtn_eraser_clicked)
self.btn_Clear = QPushButton("清空画板")
self.btn_Clear.setParent(self.frame) # 设置父对象为本界面
self.btn_Clear.move(950, 60)
self.btn_Clear.clicked.connect(self.paintBoard.clear)
self.label_penColor = QLabel(self.frame)
self.label_penColor.setText("画笔颜色")
self.label_penColor.move(990, 100)
# 获取颜色列表(字符串类型)
self.colorList = QColor.colorNames()
self.comboBox_penColor = QComboBox(self.frame)
self.fill_color_list(self.comboBox_penColor) # 用各种颜色填充下拉列表
self.comboBox_penColor.move(1080, 80)
self.comboBox_penColor.currentIndexChanged.connect(
self.on_pen_color_change) # 关联下拉列表的当前索引变更信号与函数on_PenColorChange
self.helplabel = QLabel()
self.helplabel.setText("Hi,Welcome to use EIVideo\n"
"This is a guide for EIVideo,\n"
"please check\n"
"1. Choose 'Add' for a video\n"
"2. Click 'Play' to start playing\n"
"3. At this point, all functions \n"
"are unlocked\n"
"4. Paint and enjoy it!\n")
self.widget2 = QtWidgets.QWidget(self.frame)
self.widget2.setGeometry(860, 60, 200, 300)
self.widget2.setObjectName("widget2")
self.rightLayout = QtWidgets.QVBoxLayout(self.widget2)
self.rightLayout.setContentsMargins(0, 0, 0, 0)
self.rightLayout.setObjectName("rightLayout")
self.rightLayout.addWidget(self.helplabel)
self.rightLayout.addSpacing(50)
self.rightLayout.addWidget(self.cbtn_Eraser)
self.rightLayout.addWidget(self.btn_Clear)
self.colorLayout = QtWidgets.QHBoxLayout(self.widget2)
self.colorLayout.setContentsMargins(0, 0, 0, 0)
self.colorLayout.setObjectName('colorLayout')
self.colorLayout.addWidget(self.label_penColor)
self.colorLayout.addWidget(self.comboBox_penColor)
self.rightLayout.addLayout(self.colorLayout)
# pushButton_6 -> GO
self.pushButton_6 = QtWidgets.QPushButton(self.frame)
self.pushButton_6.setGeometry(870, 600, 150, 90)
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_6.clicked.connect(self.infer)
self.widget1 = QtWidgets.QWidget(self.frame)
self.widget1.move(60, 520)
self.widget1.setObjectName("widget1")
self.barLayout = QtWidgets.QVBoxLayout(self.widget1)
self.barLayout.setContentsMargins(0, 0, 0, 0)
self.barLayout.setObjectName("barLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget1)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.timeLayout = QtWidgets.QHBoxLayout(self.widget1)
self.timeLayout.setContentsMargins(0, 0, 0, 0)
self.timeLayout.setObjectName("horizontalLayout")
self.playbtn = QtWidgets.QPushButton(self.widget1)
self.playbtn.setObjectName("playbtn")
self.playbtn.clicked.connect(lambda: self.btn_func(self.playbtn))
self.horizontalLayout.addWidget(self.playbtn)
self.pushButton_2 = QtWidgets.QPushButton(self.widget1)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(lambda: self.btn_func(self.pushButton_2))
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton_4 = QtWidgets.QPushButton(self.widget1)
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_4.clicked.connect(lambda: self.btn_func(self.pushButton_4))
self.horizontalLayout.addWidget(self.pushButton_4)
self.timeLayout.addWidget(self.progress_slider)
self.timeLayout.addWidget(self.time_label)
self.barLayout.addSpacing(20)
self.barLayout.addLayout(self.timeLayout)
self.barLayout.addSpacing(30)
self.barLayout.addLayout(self.horizontalLayout)
self.splitter = QtWidgets.QSplitter(self.frame)
self.splitter.setGeometry(QtCore.QRect(71, 670, 750, 20))
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.label = QtWidgets.QLabel(self.splitter)
self.label.setObjectName("label")
self.progressBar = QtWidgets.QProgressBar(self.splitter)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1327, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow) | [
102,
882
] |
def METHOD_NAME():
all_versions = {"1.0", "1.1", "2.0", "2.1", "3.0", "3.1"}
unaffected_ranges = []
affected_ranges = [
VersionRange.from_scheme_version_spec_string("semver", "> 1.2"),
VersionRange.from_scheme_version_spec_string("semver", "<= 2.1"),
]
resolved_ranges = []
unaffected_versions, affected_versions = categorize_versions(
all_versions,
unaffected_ranges,
affected_ranges,
resolved_ranges,
)
assert len(unaffected_versions) == 4
assert "1.0" in unaffected_versions
assert "1.1" in unaffected_versions
assert "3.0" in unaffected_versions
assert "3.1" in unaffected_versions
assert len(affected_versions) == 2
assert "2.0" in affected_versions
assert "2.1" in affected_versions | [
9,
8689,
295,
41,
246,
859,
2149
] |
def METHOD_NAME(input_string):
sha256_hash = sha256()
sha256_hash.update(input_string)
return sha256_hash.hexdigest() | [
1593,
9165,
280,
144
] |
def METHOD_NAME(self, decode=None, value=None):
"""Return an Option element of the appropriate class from this option
number.
An initial value may be set using the decode or value options, and will
be fed to the resulting object's decode method or value property,
respectively."""
option = self.format(self)
if decode is not None:
option.decode(decode)
if value is not None:
option.value = value
return option | [
129,
1335
] |
def METHOD_NAME(self):
i = list(self.c.keys())[0]
assert i == 0 | [
9,
84,
219,
245
] |
def METHOD_NAME(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag") | [
431
] |
def METHOD_NAME(self):
"""
This function serialize the object to dict.
Returns:
Dict of elements contained into the object.
"""
return {
'entity_id' : self.getEntity_id(),
'description' : self.getDescription(),
'sub_packages' : self.getSub_packages(),
'id': self.getUuid(),
'pk_id': self.getId(),
'commands':{
'postCommandSuccess': {
'command': self.getPostCommandSuccess_command(),
'name': self.getPostCommandSuccess_name()
},
'installInit':{
'command': self.getInstallInit_command(),
'name': self.getInstallInit_name()
},
"postCommandFailure": {
"command": self.getPostCommandFailure_command(),
"name": self.getPostCommandFailure_name(),
},
"command": {
"command": self.getCommand_command(),
"name": self.getCommand_name(),
},
"preCommand": {
"command": self.getPreCommand_command(),
"name": self.getPreCommand_name()
}
},
'name': self.getLabel(),
'targetos': self.getOs(),
'reboot': self.getReboot(),
'version': self.getVersion(),
'inventory': {
'associateinventory': self.getInventory_associateinventory(),
'licenses': self.getInventory_licenses(),
"queries": {
"Qversion": self.getQversion(),
"Qvendor": self.getQvendor(),
"boolcnd": self.getBoolcnd(),
"Qsoftware": self.getQsoftware()
},
"metagenerator": self.getMetaGenerator()
},
"pkgs_share_id": self.getpkgs_share_id(),
"edition_status": self.getedition_status(),
"conf_json": self.getconf_json(),
"size": self.getsize()
} | [
24,
877
] |
def METHOD_NAME(self, method):
g = Graph()
vol = np.random.random(size=(100, 110, 120))
self.vol = vigra.taggedView(vol, axistags="xyz")
vol5d = np.random.random(size=(3, 100, 110, 120, 7))
self.vol5d = vigra.taggedView(vol5d, axistags="cxyzt")
piper = OpArrayPiperWithAccessCount(graph=g)
piper.Input.setValue(self.vol)
self.g = g
self.piper = piper | [
102,
103
] |
def METHOD_NAME(url, filename, data=None, timeout=300):
"""
Retrieve a file from given url.
:param url: source URL.
:param filename: destination path.
:param data: (optional) data to post.
:param timeout: (optional) default timeout in seconds.
:return: `None`.
"""
process = Process(target=_url_download, args=(url, filename, data))
log.info("Fetching %s -> %s", url, filename)
process.start()
process.join(timeout)
if process.is_alive():
process.terminate()
process.join()
raise OSError("Aborting downloading. Timeout was reached.") | [
274,
136
] |
def METHOD_NAME(iredisrc):
global config
with path(project_data, "iredisrc") as p:
config_obj = ConfigObj(str(p))
for _file in [system_config_file, iredisrc, pwd_config_file]:
_config = read_config_file(_file)
if bool(_config) is True:
config_obj.merge(_config)
config_obj.filename = _config.filename
config.raw = config_obj["main"].as_bool("raw")
config.completer_max = config_obj["main"].as_int("completer_max")
config.retry_times = config_obj["main"].as_int("retry_times")
config.newbie_mode = config_obj["main"].as_bool("newbie_mode")
config.rainbow = config_obj["main"].as_bool("rainbow")
config.socket_keepalive = config_obj["main"].as_bool("socket_keepalive")
config.no_info = config_obj["main"].as_bool("no_info")
config.bottom_bar = config_obj["main"].as_bool("bottom_bar")
config.warning = config_obj["main"].as_bool("warning")
config.decode = config_obj["main"]["decode"]
config.log_location = config_obj["main"]["log_location"]
config.completion_casing = config_obj["main"]["completion_casing"]
config.history_location = config_obj["main"]["history_location"]
config.alias_dsn = config_obj["alias_dsn"]
config.shell = config_obj["main"].as_bool("shell")
config.pager = config_obj["main"].get("pager")
config.enable_pager = config_obj["main"].as_bool("enable_pager")
config.prompt = config_obj["main"].get("prompt")
return config_obj | [
557,
200,
1537
] |
def METHOD_NAME(self):
modules.load_known_modules()
p_ll = gp.GeoPolygon(Polygon([self.loc_ll]), self.crs_ll)
p_utm = gp.GeoPolygon(Polygon([self.loc_utm]), self.crs_utm_6s)
conv_loc_utm = p_ll.polygon(p_utm.crs()).at(0)
conv_loc_ll = p_utm.polygon(p_ll.crs()).at(0)
np.testing.assert_array_almost_equal(
p_ll.polygon().at(0), conv_loc_ll, decimal=7
)
np.testing.assert_array_almost_equal(
p_utm.polygon().at(0), conv_loc_utm, decimal=2
)
np.testing.assert_array_almost_equal(self.loc_ll, conv_loc_ll, decimal=7)
np.testing.assert_array_almost_equal(self.loc_utm, conv_loc_utm, decimal=2) | [
9,
1719
] |
def METHOD_NAME(text):
"""
Determine if a string value is either None or an empty string.
:param text: the string to test
:return: True, if the string has no content, False otherwise
"""
return text is None or len(text) == 0 | [
137,
35
] |
def METHOD_NAME(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(self.d, self.seq.get_dbp()) | [
9,
19,
13088
] |
def METHOD_NAME(self, mock_get_default_issuer):
IssuanceLine.resolve_issuer()
mock_get_default_issuer.assert_called_once() | [
9,
1014,
5500
] |
def METHOD_NAME(connection, module):
name = module.params.get("name")
group = module.params.get("group")
path = module.params.get("path")
params = dict()
iam_users = []
if not group and not path:
if name:
params["UserName"] = name
try:
iam_users.append(connection.get_user(**params)["User"])
except is_boto3_error_code("NoSuchEntity"):
pass
except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg=f"Couldn't get IAM user info for user {name}")
if group:
params["GroupName"] = group
try:
iam_users = list_iam_users_with_backoff(connection, "get_group", **params)["Users"]
except is_boto3_error_code("NoSuchEntity"):
pass
except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg=f"Couldn't get IAM user info for group {group}")
if name:
iam_users = [user for user in iam_users if user["UserName"] == name]
if path and not group:
params["PathPrefix"] = path
try:
iam_users = list_iam_users_with_backoff(connection, "list_users", **params)["Users"]
except is_boto3_error_code("NoSuchEntity"):
pass
except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg=f"Couldn't get IAM user info for path {path}")
if name:
iam_users = [user for user in iam_users if user["UserName"] == name]
module.exit_json(iam_users=[describe_iam_user(user) for user in iam_users]) | [
245,
1694,
3467
] |
def METHOD_NAME(client, start, stop):
raw_value = np.array([-11, 42, 10])
value = ibis.array(raw_value)
expr = value[start:stop]
result = client.execute(expr)
expected = raw_value[start:stop]
nt.assert_array_equal(result, expected) | [
9,
877,
55,
1997
] |
def METHOD_NAME(self):
if 'atoms' in self.settings.input:
del self.settings.input.atoms
if 'lattice' in self.settings.input:
del self.settings.input.lattic | [
188,
4077
] |
def METHOD_NAME(sim, threads=1, singles_name="Singles"):
# main options
sim.user_info.visu = False
sim.user_info.number_of_threads = threads
sim.user_info.random_seed = 123456789
# units
m = gate.g4_units("m")
mm = gate.g4_units("mm")
Bq = gate.g4_units("Bq")
MBq = Bq * 1e6
sec = gate.g4_units("second")
# change world size
world = sim.world
world.size = [2 * m, 2 * m, 2 * m]
world.material = "G4_AIR"
# add a PET Biograph
pet = pet_biograph.add_pet(sim, "pet")
singles = pet_biograph.add_digitizer(
sim, pet.name, paths.output / f"test049_pet.root", singles_name=singles_name
)
# add NECR phantom
phantom = phantom_necr.add_necr_phantom(sim, "phantom")
# physics
sim.physics_manager.physics_list_name = "G4EmStandardPhysics_option4"
sim.physics_manager.global_production_cuts.all = 1 * m
sim.physics_manager.set_production_cut(phantom.name, "all", 10 * mm)
sim.physics_manager.set_production_cut(f"{pet.name}_crystal", "all", 0.1 * mm)
# default source for tests
source = phantom_necr.add_necr_source(sim, phantom)
total_yield = gate.get_rad_yield("F18")
print("Yield for F18 (nb of e+ per decay) : ", total_yield)
source.activity = 3000 * Bq * total_yield
source.activity = 1787.914158 * MBq * total_yield / sim.user_info.number_of_threads
# source.n = 50000
source.half_life = 6586.26 * sec
source.energy.type = "F18_analytic" # WARNING not ok, but similar to previous Gate
# source.energy.type = "F18" # this is the correct F18 e+ source
# add stat actor
s = sim.add_actor("SimulationStatisticsActor", "Stats")
s.track_types_flag = True
# timing
sec = gate.g4_units("second")
sim.run_timing_intervals = [[0, 0.00005 * sec]]
# sim.run_timing_intervals = [[0, 0.00005 * sec]]
# set user hook to dump production cuts from G4
sim.user_fct_after_init = check_production_cuts | [
129,
3036
] |
def METHOD_NAME(
self,
) -> None:
with self.assertRaises(ValueError):
Keyframe(duration="500ms") | [
9,
4537,
220,
24,
673,
7680,
5602
] |
def METHOD_NAME(self, inputs):
input_shape = tf.shape(inputs)
batch_size, sequence_length = input_shape[0], input_shape[1]
i = tf.range(sequence_length)[:, tf.newaxis]
j = tf.range(sequence_length)
mask = tf.cast(i >= j, dtype="int32")
mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
mult = tf.concat(
[
tf.expand_dims(batch_size, -1),
tf.constant([1, 1], dtype=tf.int32),
],
axis=0,
)
return tf.tile(mask, mult) | [
19,
11558,
3998,
361
] |
def METHOD_NAME(self):
cntr = 0
for fake_model_card in self.WRONG_VERSION_FAKE_MODELS:
with self.assertRaises(ValueError):
get_version_from_modelcard(fake_model_card)
cntr += 1
self.assertEqual(cntr, len(self.WRONG_VERSION_FAKE_MODELS)) | [
9,
216,
15034,
578,
5427,
41,
3534
] |
def METHOD_NAME(ib: CellBasis, **kwargs) -> SvgPlot:
nrefs = kwargs["nrefs"] if "nrefs" in kwargs else 2
m, _ = ib.refinterp(ib.mesh.p[0], nrefs=nrefs)
return draw(m, boundaries_only=True, **kwargs) | [
1100,
1189
] |
def METHOD_NAME(salt_call_cli):
"""
Test syncing all ModuleCase with whitelist and blacklist
"""
expected_return = {
"engines": [],
"clouds": [],
"grains": [],
"beacons": [],
"utils": [],
"returners": [],
"executors": [],
"modules": [],
"renderers": [],
"log_handlers": [],
"matchers": [],
"states": [],
"sdb": [],
"proxymodules": [],
"output": [],
"thorium": [],
"serializers": [],
}
ret = salt_call_cli.run(
"saltutil.sync_all",
extmod_whitelist={"modules": ["runtests_decorators"]},
extmod_blacklist={"modules": ["runtests_decorators"]},
)
assert ret.returncode == 0
assert ret.data
assert ret.data == expected_return | [
9,
164,
75,
3301,
61,
7842
] |
def METHOD_NAME(self, chunk_size: int | None = 1, decode_unicode: bool = False) -> Iterator[Any]: ... | [
84,
459
] |
def METHOD_NAME(param) -> pyows_types.OutputDescription:
if isinstance(param, LiteralData):
return pyows_types.OutputDescription(
identifier=param.identifier,
data_description=pyows_types.LiteralDataDescription(
# TODO: probably similar to encode_input()
domains=[],
formats=[],
),
title=param.title,
abstract=param.abstract,
) | [
421,
146
] |
def METHOD_NAME(self, metadata: dict) -> dict:
"""
Normalize metadata to ensure it is within the size limit and doesn't contain complex objects.
"""
result = {}
current_size = 0
for key, value in metadata.items():
if isinstance(value, (str, int, float, bool)) or (isinstance(value, list) and all(isinstance(item, str) for item in value)):
# Calculate the size of the key and value
item_size = len(str(key)) + len(str(value))
# Check if adding the item exceeds the size limit
if self.max_metadata_size is None or current_size + item_size <= self.max_metadata_size:
result[key] = value
current_size += item_size
return result | [
5419,
773
] |
def METHOD_NAME(self, X, y=None):
if y is not None:
self._wrapped_model.METHOD_NAME(X, y)
else:
self._wrapped_model.METHOD_NAME(X)
return self | [
90
] |
def METHOD_NAME(cls, command):
replacements = [
# escape ! as \!
('!', "\\!"),
# see Shell.join()
('"', '"\'"\'"'),
# similar to above, but for backticks
('`', '"\'`\'"'),
# escape $ if not part of a valid var ref like $FOO, ${FOO}
(re.compile(r"\$([^a-zA-Z{])"), '"\'$\'"\\1'),
(re.compile(r"\$$"), '"\'$\'"') # edge case, $ at end
]
return shlex_join(command, replacements=replacements) | [
2831
] |
def METHOD_NAME(self, resources):
client = local_session(self.manager.session_factory).client('fis')
for r in resources:
self.manager.retry(client.delete_experiment_template, id=r['id']) | [
356
] |
def METHOD_NAME(self, inputs: list):
"""
Called when the execution of a network starts
inputs: list of Tensor
"""
print("Network execution started.") | [
447,
1228,
2046
] |
f METHOD_NAME(self): | [
9,
1171
] |
def METHOD_NAME(self):
first_value = 'https://op'
second_value = ['https://client.com.br/oidc_callback']
third_value = {'scope': 'openid email profile'}
self.client.post(url_for('register'), json={
'op_url': first_value,
'redirect_uris': second_value,
'additional_params': third_value
})
ClientHandler.__init__.assert_called_once_with(first_value, second_value, third_value) | [
9,
841,
427,
1437,
490,
434
] |
def METHOD_NAME(self):
mod = self.make_module("""
#include <Python.h>
HPyDef_METH(f, "f", HPyFunc_NOARGS)
static HPy f_impl(HPyContext *ctx, HPy self)
{
PyObject *o = HPy_AsPyObject(ctx, HPy_NULL);
if (o == NULL) {
return HPy_Dup(ctx, ctx->h_True);
}
else {
return HPy_Dup(ctx, ctx->h_False);
}
}
@EXPORT(f)
@INIT
""")
assert mod.f() | [
9,
-1,
1051
] |
def METHOD_NAME(soln_stk, container_config):
"""
Whether the solution stack runs Docker containers.
:param soln_stk: SolutionStack: the solution stack
:param container_config: dict: container_config.json as dict
:return: bool
"""
return (is_preconfigured(soln_stk, container_config) or
is_generic(soln_stk, container_config)) | [
137,
224
] |
def METHOD_NAME():
cosbench.cleanup() | [
1843
] |
def METHOD_NAME(tree: ast.AST, defn_env: dict, phi_name: str = "phi"):
# tree = MoveReturn().visit(tree)
# tree.body.append(
# ast.Return(ast.Name("__magma_ssa_return_value", ast.Load())))
ssa_visitor = SSAVisitor(phi_name)
tree = ssa_visitor.visit(tree)
return_transformer = TransformReturn()
tree = return_transformer.visit(tree)
num_return_values = len(ssa_visitor.return_values)
for i in reversed(range(num_return_values)):
conds = ssa_visitor.return_values[i]
name = f"__magma_ssa_return_value_{i}"
if i == num_return_values - 1 or not conds:
if isinstance(tree.returns, ast.Tuple):
tree.body.append(ast.Assign(
[ast.Tuple([ast.Name(f"O{i}", ast.Store())
for i in range(len(tree.returns.elts))], ast.Store())],
ast.Name(name, ast.Load())
))
else:
tree.body.append(ast.Assign([ast.Name("O", ast.Load)],
ast.Name(name, ast.Load())))
else:
cond = conds[-1]
for c in conds[:-1]:
cond = ast.BinOp(cond, ast.BitAnd(), c)
if isinstance(tree.returns, ast.Tuple):
for i in range(len(tree.returns.elts)):
tree.body.append(ast.Assign(
[ast.Name(f"O{i}", ast.Store())],
ast.Call(ast.Name(phi_name, ast.Load()), [
ast.List([
ast.Name(f"O{i}", ast.Load()),
ast.Subscript(ast.Name(name, ast.Load()),
ast.Index(ast.Num(i)),
ast.Load())
], ast.Load()),
cond], []))
)
else:
tree.body.append(ast.Assign(
[ast.Name("O", ast.Store())],
ast.Call(ast.Name(phi_name, ast.Load()), [
ast.List([ast.Name("O", ast.Load()), ast.Name(name, ast.Load())],
ast.Load()), cond], []))
)
return tree, ssa_visitor.args | [
197,
151,
24,
3692
] |
def METHOD_NAME(self, system_frame):
m = system_frame
set_operating_conditions(m)
initialize_system(m)
# test feed water flow
assert value(m.fs.feed.properties[0].flow_mass_comp["H2O"]) == pytest.approx(
13127.656969, rel=1e-3
)
assert value(m.fs.feed.properties[0].flow_mass_comp["tss"]) == pytest.approx(
3.193938, rel=1e-5
)
assert value(m.fs.feed.properties[0].flow_mass_comp["cod"]) == pytest.approx(
3.52794065556, abs=1e-10
)
assert value(m.fs.feed.properties[0].flow_mass_comp["oxygen"]) == pytest.approx(
9.3989169, rel=1e-5
)
assert value(
m.fs.feed.properties[0].flow_mass_comp["carbon_dioxide"]
) == pytest.approx(1.3143778e-5, abs=1e-10) | [
9,
0,
6213,
1626
] |
def METHOD_NAME():
assert subf2m._get_episode_from_release(
"Vinland Saga Season 2 - 05 [Crunchyroll][Crunchyroll] Vinland Saga Season 2 - 05"
) == {"season": [2], "episode": [5]} | [
9,
19,
3188,
280,
586
] |
def METHOD_NAME(container: dagger.Container) -> dagger.Container:
return container.with_exec(
[
"apk",
"add",
"--no-cache",
"--virtual",
".build-deps",
"ca-certificates",
"coreutils",
"cyrus-sasl-dev",
"gcc",
"libc-dev",
"libevent-dev",
"linux-headers",
"make",
"openssl",
"openssl-dev",
"perl",
"perl-io-socket-ssl",
"perl-utils",
]
) | [
0,
2410
] |
def METHOD_NAME(output_path: str) -> dict:
return {
"run_id": "powerbi-report-server-test",
"source": {
"type": "powerbi-report-server",
"config": {
**default_source_config(),
},
},
"sink": {
"type": "file",
"config": {"filename": output_path}, # ,
},
} | [
19,
235,
3912
] |
def METHOD_NAME(argv: Optional[List[str]] = None) -> None:
"""
Handle command line arguments and get things started.
Parameters
----------
argv : Optional[List[str]], default=None
List of arguments, as if specified on the command-line.
If ``None``, ``sys.argv[1:]`` is used instead.
Defaults to ``None``.
"""
parser = argparse.ArgumentParser(
description="Prints out the weights of a" " given model.",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("model_file", help="model file to load")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--k", help="number of top features to print (0 for all)", type=int, default=50
)
group.add_argument(
"--sort_by_labels",
"-s",
action="store_true",
default=False,
help="order the features by classes",
)
parser.add_argument(
"--sign",
choices=["positive", "negative", "all"],
default="all",
help="show only positive, only negative or all weights",
)
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
args = parser.parse_args(argv)
# Make warnings from built-in warnings module get formatted more nicely
logging.captureWarnings(True)
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - " "%(message)s")
k = args.k if args.k > 0 else None
learner = Learner.from_file(args.model_file)
(weights, intercept) = learner.model_params
multiclass = False
model = learner._model
if (
isinstance(model, LinearSVC)
or (isinstance(model, LogisticRegression) and len(learner.label_list) > 2)
or (isinstance(model, SVC) and model.kernel == "linear")
):
multiclass = True
weight_items = iter(weights.items())
if args.sign == "positive":
weight_items = (x for x in weight_items if x[1] > 0)
elif args.sign == "negative":
weight_items = (x for x in weight_items if x[1] < 0)
if intercept is not None:
# subclass of LinearModel
if "_intercept_" in intercept:
# Some learners (e.g. LinearSVR) may return an array of intercepts but
# sometimes that array is of length 1 so we don't need to print that
# as an array/list. First, let's normalize these cases.
model_intercepts = intercept["_intercept_"]
intercept_is_array = isinstance(model_intercepts, np.ndarray)
num_intercepts = len(model_intercepts) if intercept_is_array else 1
if intercept_is_array and num_intercepts == 1:
model_intercepts = model_intercepts[0]
intercept_is_array = False
# now print out the intercepts
print(f"intercept = {model_intercepts:.12f}")
else:
print("== intercept values ==")
for label, val in intercept.items():
print(f"{val:.12f}\t{label}")
print()
print("Number of nonzero features:", len(weights), file=sys.stderr)
weight_by_class: DefaultDict[str, Dict[str, float]] = defaultdict(dict)
if multiclass and args.sort_by_labels:
for label_feature, weight in weight_items:
label, feature = label_feature.split()
weight_by_class[label][feature] = weight
for label in sorted(weight_by_class):
for feat, val in sorted(weight_by_class[label].items(), key=lambda x: -abs(x[1])):
print(f"{val:.12f}\t{label}\t{feat}")
else:
for feat, val in sorted(weight_items, key=lambda x: -abs(x[1]))[:k]:
print(f"{val:.12f}\t{feat}") | [
57
] |
f METHOD_NAME(
self, mock_hparams, mock_model_options): | [
9,
129,
8866,
61,
578,
1881,
217
] |
def METHOD_NAME(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0) | [
214,
884
] |
def METHOD_NAME(project):
assert not project.is_public
assert not project.is_private
assert project.is_semipublic | [
9,
137,
8268
] |
def METHOD_NAME(dt: datetime.datetime) -> datetime.datetime:
"""Convert datetime.datetime object in UTC timezone to local timezone
Args:
dt: datetime.datetime object
Returns:
datetime.datetime in local timezone
Raises:
TypeError if dt is not a datetime.datetime object
ValueError if dt is not in UTC timezone
"""
if type(dt) != datetime.datetime:
raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}")
if dt.tzinfo is not datetime.timezone.utc:
raise ValueError(f"{dt} must be in UTC timezone: timezone = {dt.tzinfo}")
return dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None) | [
884,
1166,
24,
125
] |
def METHOD_NAME(self):
self.login()
response = self.client.get(
reverse("wagtailadmin_pages:view_draft", args=(self.child_page.id,))
)
# Should show edit link in the userbar
# https://github.com/wagtail/wagtail/issues/10002
self.assertContains(response, "Edit this page")
self.assertContains(
response, reverse("wagtailadmin_pages:edit", args=(self.child_page.id,))
) | [
9,
697,
2004,
548,
623,
-1
] |
def METHOD_NAME(self):
basic_layout(self, src_folder="src") | [
571
] |
f METHOD_NAME(self, dir=-1): | [
829,
137,
6178
] |
def METHOD_NAME():
# Run test case in a separate process to track patching of torch by NNCF
run_pytest_case_function_in_separate_process(test_jit_script_exception_preserves_patching_isolated) | [
9,
3821,
782,
442,
9875,
7954
] |
def METHOD_NAME():
html_source_data = """<font color="#848484" face="arial, tahoma, verdana, sans-serif">
<span style="font-size: 11px; line-height: 16.9px;">Test Data</span></font>"""
html_translated_data = """<font color="#848484" face="arial, tahoma, verdana, sans-serif">
<span style="font-size: 11px; line-height: 16.9px;"> testituloksia </span></font>"""
return {
"hr": ["Test data", "Testdaten"],
"ms": ["Test Data", "ujian Data"],
"et": ["Test Data", "testandmed"],
"es": ["Test Data", "datos de prueba"],
"en": ["Quotation", "Tax Invoice"],
"fi": [html_source_data, html_translated_data],
} | [
19,
2518,
365
] |
def METHOD_NAME(params,
g_t,
m,
lr,
alpha=1.0,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = alpha + sign_decayed * np.sign(g_t) * np.sign(m_t)
params_t = params - lr * multiplier * g_t
return params_t, m_t | [
-1,
86,
2028
] |
def METHOD_NAME():
with kdb.KDB() as db:
ks = kdb.KeySet(10)
db.get(ks, spec_base_key)
if len(ks.cut(kdb.Key(spec_base_key))) > 0:
print("ERROR: Couldn't setup spec, keys exist!", file=sys.stderr)
exit(1)
ks.extend(spec)
db.set(ks, spec_base_key) | [
102,
1457
] |
def METHOD_NAME(self, mains):
"""In-memory disaggregation.
Parameters
----------
mains : pd.Series
Returns
-------
appliance_powers : pd.DataFrame where each column represents a
disaggregated appliance. Column names are the integer index
into `self.model` for the appliance in question.
"""
'''if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. The model"
" can be instantiated by running `train`.")'''
print("...............CO disaggregate_chunk running.............")
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Because CombinatorialOptimisation could have been trained using
# either train() or train_on_chunk(), we must
# set state_combinations here.
self._set_state_combinations_if_necessary()
"""
# Add vampire power to the model
if vampire_power is None:
vampire_power = get_vampire_power(mains)
if vampire_power > 0:
print("Including vampire_power = {} watts to model..."
.format(vampire_power))
n_rows = self.state_combinations.shape[0]
vampire_power_array = np.zeros((n_rows, 1)) + vampire_power
state_combinations = np.hstack(
(self.state_combinations, vampire_power_array))
else:
state_combinations = self.state_combinations
"""
state_combinations = self.state_combinations
summed_power_of_each_combination = np.sum(state_combinations, axis=1)
# summed_power_of_each_combination is now an array where each
# value is the total power demand for each combination of states.
# Start disaggregation
test_prediction_list = []
for test_df in mains:
appliance_powers_dict = {}
indices_of_state_combinations, residual_power = find_nearest(
summed_power_of_each_combination, test_df.values)
for i, model in enumerate(self.model):
print("Estimating power demand for '{}'"
.format(model['appliance_name']),end="\r")
predicted_power = state_combinations[
indices_of_state_combinations, i].flatten()
column = pd.Series(
predicted_power, index=test_df.index, name=i)
appliance_powers_dict[self.model[i]['appliance_name']] = column
appliance_powers = pd.DataFrame(
appliance_powers_dict, dtype='float32')
test_prediction_list.append(appliance_powers)
return test_prediction_list | [
-1,
464
] |
def METHOD_NAME(pillar_include_tree, salt_call_cli):
"""
Test pillar include when a pillar file
has already been included.
"""
ret = salt_call_cli.run("pillar.items")
assert ret.returncode == 0
assert ret.data
assert "element" in ret.data
assert "d" in ret.data["element"]
assert ret.data["element"]["d"] == {"c": ["Entry C"]} | [
9,
5005,
1872,
997,
3904
] |
def METHOD_NAME(location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKMSKeyRingResult]:
"""
Provides access to Google Cloud Platform KMS KeyRing. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_ring)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings).
A KeyRing is a grouping of CryptoKeys for organizational purposes. A KeyRing belongs to a Google Cloud Platform Project
and resides in a specific location.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(location="us-central1",
name="my-key-ring")
```
:param str location: The Google Cloud Platform location for the KeyRing.
A full list of valid locations can be found by running `gcloud kms locations list`.
- - -
:param str name: The KeyRing's name.
A KeyRing name must exist within the provided location and match the regular expression `[a-zA-Z0-9_-]{1,63}`
:param str project: The project in which the resource belongs. If it
is not provided, the provider project is used.
"""
... | [
19,
1666,
59,
5402,
146
] |
def METHOD_NAME(sentences, mentions, output_file=None):
pairs = sentence_mentions_pairs(sentences, mentions)
for sent, ments in pairs:
present_sentence_mentions(sent, ments, output_file) | [
2541,
9109,
6394
] |
def METHOD_NAME(self):
self.check_roundtrip(with_simple) | [
9,
41,
53
] |
def METHOD_NAME(message, decoded_signals):
formatted_signals = []
for signal in message.signals:
try:
value = decoded_signals[signal.name]
except KeyError:
continue
signal_name = signal.name
if signal.unit is None or \
isinstance(value, NamedSignalValue) or \
isinstance(value, str):
formatted_signal = f'{signal_name}: {value}'
else:
formatted_signal = f'{signal_name}: {value} {signal.unit}'
formatted_signals.append(formatted_signal)
return formatted_signals | [
275,
7958
] |
def METHOD_NAME(
column, preprocessing_parameters: PreprocessingConfigDict, backend, is_input_feature: bool
) -> FeatureMetadataDict:
return {"preprocessing": preprocessing_parameters} | [
19,
964,
1094
] |
def METHOD_NAME(self) -> str:
return pulumi.get(self, "app_insights_instrumentation_key") | [
991,
1689,
4584,
59
] |
def METHOD_NAME(pos=(0, 0), size=(0, 0), png=None, backcolor=None, backcolor_sel=None, flags=0):
return eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, int(pos[0]), int(pos[1]), int(size[0]), int(size[1]), __resolvePixmap(png), __resolveColor(backcolor), __resolveColor(backcolor_sel), flags | [
457,
459,
475,
9103,
1139,
9
] |
def METHOD_NAME(self) -> None:
expected_message = "Your personal build for Project :: Compile build 5535 - CL 123456 is broken with status Exit code 1 (new)! :thumbs_down: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
payload = orjson.dumps(
orjson.loads(self.webhook_fixture_data(self.WEBHOOK_DIR_NAME, "personal"))
)
self.client_post(self.url, payload, content_type="application/json")
msg = self.get_last_message()
self.assertEqual(msg.content, expected_message)
self.assertEqual(msg.recipient.type, Recipient.PERSONAL) | [
9,
9993,
8757
] |
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict()) | [
24,
3
] |
def METHOD_NAME(self):
test_data = self.__create_test_data()
result = RemoveOutliers(
test_data['df'],
test_data['column_types'],
test_data['statistics'],
custom_config=dict(max_z_score=2),
).evaluate()
self.assertEqual(
result,
[
dict(
title='Remove outliers',
message='Remove 1 outlier(s) and null values to reduce the amount of '
'noise in this column.',
status='not_applied',
action_payload=dict(
action_type='filter',
action_arguments=['number3'],
action_code='number3 <= 1491.0900000000001 and'
' number3 >= 339.40999999999997',
action_options={},
action_variables={},
axis='row',
outputs=[],
),
),
],
) | [
9,
1195,
41,
343,
200
] |
def METHOD_NAME(obj):
"""Attempt to use `tolist` method to convert to normal Python list."""
if hasattr(obj, "tolist"):
return obj.tolist()
else:
raise NotEncodable | [
421,
947,
245
] |
def METHOD_NAME(self, typingctx, other):
"""
Unify this with the *other* USMNdArray.
"""
# If other is array and the ndim, usm_type, address_space, and device
# attributes match
if (
isinstance(other, USMNdArray)
and other.ndim == self.ndim
and self.device == other.device
and self.addrspace == other.addrspace
and self.usm_type == other.usm_type
):
# If dtype matches or other.dtype is undefined (inferred)
if other.dtype == self.dtype or not other.dtype.is_precise():
if self.layout == other.layout:
layout = self.layout
else:
layout = "A"
readonly = not (self.mutable and other.mutable)
aligned = self.aligned and other.aligned
return type(self)(
dtype=self.dtype,
ndim=self.ndim,
layout=layout,
readonly=readonly,
aligned=aligned,
usm_type=self.usm_type,
device=self.device,
addrspace=self.addrspace,
) | [
2969
] |
def METHOD_NAME(dom_xml, vhostmd_conf,
sap_agent):
dom_xml_before = hooking.read_domxml().toxml()
vhostmd_before.main(vhostmd_conf)
assert hooking.read_domxml().toxml() == dom_xml_before | [
9,
944,
41,
6364,
1849,
1295,
427
] |
def METHOD_NAME(duthost, config_source="config_db", wait=60, start_bgp=False):
if config_source == "config_db":
cmd = "config reload -y"
elif config_source == "minigraph":
cmd = "config load_minigraph -y"
else:
raise Exception("Unknown config source")
log_debug("config_reload cmd: {}".format(cmd))
ret = duthost.shell(cmd)
assert ret["rc"] == 0, "failed to run err:{}".format(str(ret["stderr"]))
if start_bgp:
duthost.shell("config bgp startup all")
log_debug("config_reload started BGP")
log_debug("wait for {}".format(wait))
time.sleep(wait)
log_debug("config_reload complete") | [
200,
1372
] |
def METHOD_NAME(self):
hcl_res = hcl2.loads("""
resource "azurerm_mysql_server" "example" {
name = "example-mysqlserver"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
administrator_login = "mysqladminun"
administrator_login_password = "H@Sh1CoR3!" # checkov:skip=CKV_SECRET_80 test secret
sku_name = "B_Gen5_2"
storage_mb = 5120
version = "5.7"
auto_grow_enabled = true
backup_retention_days = 7
geo_redundant_backup_enabled = true
infrastructure_encryption_enabled = true
public_network_access_enabled = false
ssl_enforcement_enabled = false
ssl_minimal_tls_version_enforced = "TLS1_2"
threat_detection_policy {
enabled = false
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_mysql_server']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result) | [
9,
374
] |
f METHOD_NAME(self): | [
34,
1537,
61,
1190
] |
async def METHOD_NAME(self, dashboard_id, cell_id, **kwargs): ... | [
34,
4774,
147,
383,
147,
958
] |
def METHOD_NAME(self, label, transformation=None, **kwargs):
return label | [
3725,
636
] |
def METHOD_NAME(self, task_id, label_mapping, **kwargs): ... | [
72,
620,
147,
415
] |
def METHOD_NAME(f):
"""Decorator to check if current user is an owner of the record."""
@wraps(f)
def inner(self, record, user, *args, **kwargs):
if not current_user.is_authenticated:
abort(401)
if not current_user.id in record['_deposit']['owners']:
current_app.logger.warning(
"OWN-API: User is not allowed to change ownership. skipping..", exc_info=True)
current_app.logger.warning("OWN-API: Owners are: {}".format("\n ".join([str(User.query.filter(
User.id.in_([i])).all()[0].email) for i in record['_deposit']['owners']])), exc_info=True)
abort(403)
return f(self, record, user, *args, **kwargs)
return inner | [
403,
148,
3982
] |
def METHOD_NAME():
client = boto3.client("mq", region_name="us-east-2")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[],
)["BrokerId"]
with pytest.raises(ClientError) as exc:
client.describe_user(BrokerId=broker_id, Username="unknown")
err = exc.value.response["Error"]
assert err["Code"] == "NotFoundException"
assert (
err["Message"]
== "Can't find requested user [unknown]. Make sure your user exists."
) | [
9,
2517,
21,
46
] |
def METHOD_NAME(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is VAT in a DataFrame column. For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
col
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(vat.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(vat.is_valid)
else:
return df.applymap(vat.is_valid)
return vat.is_valid(df) | [
187,
4093,
3205
] |
def METHOD_NAME(self, mock_send_new_pending_email):
self.assert_models_exist(False, False)
call_command(self.command, self.user.username, create_config=True, disconnect_signals=True)
self.assert_models_exist(True, True)
assert not mock_send_new_pending_email.called | [
9,
129,
58,
1089,
377,
7958,
7959
] |
def METHOD_NAME(self):
self.assertEqual("12 servings", self.harvester_class.yields()) | [
9,
4119
] |
def METHOD_NAME():
"""Assert that the StackingEstimator returns transformed X based on test feature list 1."""
ds = FeatureSetSelector(subset_list="tests/subset_test.csv", sel_subset="test_subset_1")
ds.fit(test_X, y=None)
transformed_X = ds.transform(test_X)
assert transformed_X.shape[0] == test_X.shape[0]
assert transformed_X.shape[1] != test_X.shape[1]
assert transformed_X.shape[1] == 5
assert np.array_equal(transformed_X, test_X[ds.feat_list].values) | [
9,
964,
0,
5169,
1170
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.