text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): # objects such as deques, sets, and dictionaries enforce # length immutability during iteration it = self.it self.assertEqual(length_hint(it), n) next(it) self.assertEqual(length_hint(it), n-1) self.mutate() self.assertRaises(RuntimeError, next, it) self.assertEqual(length_hint(it), 0)
[ 9, 478, 4217, 2478 ]
def METHOD_NAME(self): if not self.thread_started: self.thread.METHOD_NAME() logger.debug('Starting Kite HTTP session...') self.endpoint = requests.Session() self.languages = self.get_languages() self.sig_client_started.emit(self.languages)
[ 447 ]
def METHOD_NAME(*args): return {feature[PROC_COLUMN]: feature for features in args for feature in features}
[ 19, 2305, 2247, 280, 50 ]
def METHOD_NAME(x): return float(x / 1e6)
[ 24, 17300 ]
def METHOD_NAME(self): """ drop the current collection """ try: self.current_database.METHOD_NAME(self.collection_name) except Exception, e: assert False, " ERROR - Deleting a collection %s in MongoDB...\n %s" % (self.current_collection, str(e))
[ 1050, 1098 ]
def METHOD_NAME( graph: Graph, updateString: str, initBindings: Optional[Mapping[str, Identifier]] = None, initNs: Optional[Mapping[str, Any]] = None, base: Optional[str] = None, ) -> None: """ Process a SPARQL Update Request returns Nothing on success or raises Exceptions on error """ evalUpdate( graph, translateUpdate(parseUpdate(updateString), base, initNs), initBindings )
[ 356, 86 ]
def METHOD_NAME(self, nop_span): nop_span.set_tag("span.type", "db") assert nop_span._dd_span.span_type == "db"
[ 9, 82, 1244, 44 ]
def METHOD_NAME(self, entity): """Override method to delete entity""" raise NotImplementedError()
[ 74, 34 ]
def METHOD_NAME( self, net_size: Decimal, ) -> PositionSide: if net_size > 0: return PositionSide.LONG elif net_size < 0: return PositionSide.SHORT else: return PositionSide.FLAT
[ 214, 4259, 195, 142 ]
async def METHOD_NAME(self): self._triton_client = httpclient.InferenceServerClient(url="localhost:8000")
[ 958, 0, 1 ]
def METHOD_NAME(self, symbol: str) -> None: ...
[ 356 ]
def METHOD_NAME(hThread: int, ThreadAffinityMask): ...
[ 0, 600, 10343, 361 ]
def METHOD_NAME(self, c_args): """ Call this Callable with a string of C-style arguments. :param str c_args: C-style arguments. :return: The return value from the call. :rtype: claripy.Ast """ c_args = c_args.strip() if c_args[0] != "(": c_args = "(" + c_args if c_args[-1] != ")": c_args += ")" # Parse arguments content = "int main() { func%s; }" % c_args ast = pycparser.CParser().parse(content) if not ast.ext or not isinstance(ast.ext[0], pycparser.c_ast.FuncDef): raise AngrCallableError("Error in parsing the given C-style argument string.") if not ast.ext[0].body.block_items or not isinstance(ast.ext[0].body.block_items[0], pycparser.c_ast.FuncCall): raise AngrCallableError( "Error in parsing the given C-style argument string: Cannot find the expected function call." ) arg_exprs = ast.ext[0].body.block_items[0].args.exprs args = [] for expr in arg_exprs: if isinstance(expr, pycparser.c_ast.Constant): # string if expr.type == "string": args.append(expr.value[1:-1]) elif expr.type == "int": args.append(int(expr.value)) else: raise AngrCallableError("Unsupported expression type %s." % expr.type) else: raise AngrCallableError("Unsupported expression type %s." % type(expr)) return self.__call__(*args)
[ 128, 2629 ]
def METHOD_NAME(path, hadoop_bin=None, fs_name=None, fs_ugi=None): """hadoop mkdir directory""" hadoop_bin, fs_name, fs_ugi = parse_account(hadoop_bin, fs_name, fs_ugi) path = check_hadoop_path(path, hadoop_bin, fs_name, fs_ugi) cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi) cmd += " -mkdir %s" % path cmd += " 2>%s" % ERR_LOG ret = os.system(cmd) return ret
[ 3456 ]
def METHOD_NAME(self, parent=None): """ Clone this object. @param parent: The parent for the clone. @type parent: L{element.Element} @return: A copy of this object assigned to the new parent. @rtype: L{Attribute} """ a = Attribute(self.qname(), self.value) a.parent = parent return a
[ 670 ]
def METHOD_NAME(self, mocker: MockerFixture) -> None: """Test handle.""" args = ArgsDataModel() calculate_char_set = mocker.patch.object( RandomStringLookup, "calculate_char_set", return_value="char_set" ) ensure_has_one_of = mocker.patch.object( RandomStringLookup, "ensure_has_one_of", return_value=True ) format_results = mocker.patch.object( RandomStringLookup, "format_results", return_value="success" ) generate_random_string = mocker.patch.object( RandomStringLookup, "generate_random_string", return_value="random string" ) assert RandomStringLookup.handle("12", Mock()) == format_results.return_value calculate_char_set.assert_called_once_with(args) generate_random_string.assert_called_once_with( calculate_char_set.return_value, 12 ) ensure_has_one_of.assert_called_once_with( args, generate_random_string.return_value ) format_results.assert_called_once_with(generate_random_string.return_value)
[ 9, 276 ]
def METHOD_NAME(f): # Checks if path exists and readable if not os.path.exists(f) or not os.access(f, os.R_OK): PBinCLIError("Error accessing path: {}".format(f))
[ 250, 6056 ]
def METHOD_NAME(): with pytest.raises(ValueError): qgan = models.StyleQGAN(latent_dim=2) circuit = models.Circuit(2) with pytest.raises(ValueError): qgan = models.StyleQGAN(latent_dim=2, layers=2, circuit=circuit) with pytest.raises(ValueError): qgan = models.StyleQGAN(latent_dim=2, circuit=circuit) with pytest.raises(ValueError): qgan = models.StyleQGAN(latent_dim=2, layers=2, set_parameters=lambda x: x) reference_distribution = generate_distribution(10) qgan = models.StyleQGAN(latent_dim=2, circuit=circuit, set_parameters=lambda x: x) with pytest.raises(ValueError): qgan.fit(reference_distribution, save=False) initial_params = np.random.uniform(-0.15, 0.15, 18) qgan = models.StyleQGAN(latent_dim=2, layers=2) with pytest.raises(ValueError): qgan.fit(reference_distribution, initial_params=initial_params, save=False)
[ 9, -1, 1096 ]
def METHOD_NAME(self): # ModulePackage.getNameStream() self.assertHasAttr(self.modulePackage, "getNameStream") self.modulePackage.getNameStream()
[ 9, 19, 156, 919 ]
def METHOD_NAME(self): l = eventlet.listen(('localhost', 0)) x = eventlet.with_timeout( 0.01, eventlet.serve, l, lambda c, a: None, timeout_value="timeout") self.assertEqual(x, "timeout")
[ 9, 5999 ]
def METHOD_NAME(self, v, min_val, max_val): def constrain_internal(x): return min(max(x, min_val), max_val) return np.array([constrain_internal(x) for x in v])
[ 10835 ]
def METHOD_NAME(rotkehlchen_api_server): # Malformed address response = requests.get( api_url_for( rotkehlchen_api_server, 'per_address_avalanche_transactions_resource', address='0xasdasd', ), ) assert_error_response( response=response, contained_in_msg='address": ["Given value 0xasdasd is not an ethereum address', status_code=HTTPStatus.BAD_REQUEST, ) # Malformed from_timestamp response = requests.get( api_url_for( rotkehlchen_api_server, 'per_address_avalanche_transactions_resource', address='0xaFB7ed3beBE50E0b62Fa862FAba93e7A46e59cA7', ), json={'from_timestamp': 'foo'}, ) assert_error_response( response=response, contained_in_msg='Failed to deserialize a timestamp entry from string foo', status_code=HTTPStatus.BAD_REQUEST, ) # Malformed to_timestamp response = requests.get( api_url_for( rotkehlchen_api_server, 'per_address_avalanche_transactions_resource', address='0xaFB7ed3beBE50E0b62Fa862FAba93e7A46e59cA7', ), json={'to_timestamp': 'foo'}, ) assert_error_response( response=response, contained_in_msg='Failed to deserialize a timestamp entry from string foo', status_code=HTTPStatus.BAD_REQUEST, )
[ 9, 539, 1465, 1096 ]
def METHOD_NAME(self, request, project_id, cluster_id, node_ip): """节点指标总览""" params = self.params_validate(FetchMetricOverviewSLZ) # 默认包含 container_count, pod_count response_data = {'container_count': '0', 'pod_count': '0'} container_pod_count = prom.get_container_pod_count(cluster_id, node_ip, bk_biz_id=request.project.cc_app_id) for count in container_pod_count.get('result') or []: for k, v in count['metric'].items(): if k == 'metric_name' and count['value']: response_data[v] = count['value'][1] # 默认使用全维度,若指定则使用指定的维度 dimensions = params.get('dimensions') or [dim for dim in constants.MetricDimension] for dimension in dimensions: if dimension not in constants.NODE_DIMENSIONS_FUNC: raise error_codes.APIError(_("节点指标维度 {} 不合法").format(dimension)) dimension_func = constants.NODE_DIMENSIONS_FUNC[dimension] response_data[dimension] = dimension_func(cluster_id, node_ip, bk_biz_id=request.project.cc_app_id) return Response(response_data)
[ 4512 ]
def METHOD_NAME(cls): cls.histogram = Histogram()
[ 0, 1, 2 ]
def METHOD_NAME(self, request, dependent_scenario_name, target_sat): """After upgrade, Update, delete and clone should work on existing hostgroup(created before upgrade) :id: postupgrade-79958754-94b6-4bfe-af12-7d4031cd2dd2 :steps: 1. After upgrade, check hostgroup entities. 2. Update existing hostgroup with new entities 3. Clone hostgroup. 4. Delete hostgroup, parent hostgroup, cloned hostgroup, domain, subnet, os, loc, org. :expectedresults: After upgrade 1- Hostgroup remain same. 2- Hostgroup entities update should work. 3- Hostgroup cloned should work. 4- Cloned hostgroup should be the subset of original hostgroup. 5- Hostgroup entities deletion should work """ pre_test_name = dependent_scenario_name # verify host-group is intact after upgrade org = target_sat.api.Organization().search(query={'search': f'name="{pre_test_name}_org"'})[ 0 ] request.addfinalizer(org.delete) loc = target_sat.api.Location().search(query={'search': f'name="{pre_test_name}_loc"'})[0] request.addfinalizer(loc.delete) proxy = target_sat.api.SmartProxy().search( query={'search': f'url = {target_sat.url}:9090'} )[0] hostgrp = target_sat.api.HostGroup().search( query={'search': f'name={pre_test_name}_host_grp'} )[0] request.addfinalizer(hostgrp.parent.delete) request.addfinalizer(hostgrp.delete) assert f"{pre_test_name}_host_grp" == hostgrp.name assert proxy.id == hostgrp.puppet_proxy.id assert proxy.id == hostgrp.puppet_ca_proxy.id domain = target_sat.api.Domain().search(query={'search': f'name={pre_test_name}_domain'})[0] assert domain.id == hostgrp.domain.id request.addfinalizer(domain.delete) subnet = target_sat.api.Subnet().search(query={'search': f'name={pre_test_name}_subnet'})[0] assert subnet.id == hostgrp.subnet.id request.addfinalizer(subnet.delete) parent = target_sat.api.HostGroup().search( query={'search': f'name={pre_test_name}_parent_host_grp'} )[0] assert parent.id == hostgrp.parent.id os = target_sat.api.OperatingSystem().search(query={'search': f'name={pre_test_name}_os'})[ 0 ] assert os.id == hostgrp.operatingsystem.id request.addfinalizer(os.delete) # update host-group after upgrade new_name = gen_string('alpha') hostgrp.name = new_name hostgrp.update(['name']) assert new_name == hostgrp.name new_subnet = target_sat.api.Subnet().create() hostgrp.subnet = new_subnet hostgrp.update(['subnet']) assert new_subnet.id == hostgrp.subnet.id new_domain = target_sat.api.Domain().create() hostgrp.domain = new_domain hostgrp.update(['domain']) assert new_domain.id == hostgrp.domain.id new_os = target_sat.api.OperatingSystem().create() hostgrp.operatingsystem = new_os hostgrp.update(['operatingsystem']) assert new_os.id == hostgrp.operatingsystem.id # clone hostgroup hostgroup_cloned_name = gen_string('alpha') hostgroup_cloned = target_sat.api.HostGroup(id=hostgrp.id).clone( data={'name': hostgroup_cloned_name} ) hostgroup_search = target_sat.api.HostGroup().search( query={'search': f'name={hostgroup_cloned_name}'} ) assert len(hostgroup_search) == 1 hostgroup_cloned_object = hostgroup_search[0] request.addfinalizer(hostgroup_cloned_object.delete) hostgroup_origin = hostgrp.read_json() # remove unset values before comparison unset_keys = set(hostgroup_cloned) - set(hostgroup_origin) for key in unset_keys: del hostgroup_cloned[key] # remove unique values before comparison for key in ('updated_at', 'created_at', 'title', 'id', 'name'): del hostgroup_cloned[key] assert hostgroup_cloned.items() <= hostgroup_origin.items()
[ 9, 72, 7441, 10475 ]
def METHOD_NAME(fs: fsspec.AbstractFileSystem): with TemporaryDirectory() as tmpdir: writer = TransactionalFile(tmpdir, fs) # Ensure we can lock on a directory writer.acquire_lock() # Ensure we cannot write to a directory scoped lock with pytest.raises(RuntimeError): writer.write(b"test 1") writer.release_lock()
[ 9, 171, 1853, 2851 ]
def METHOD_NAME(self, event: Union[RunEvent, DatasetEvent | JobEvent]) -> None: # noqa: UP007 if not (isinstance(event, (RunEvent, DatasetEvent, JobEvent))): msg = "`emit` only accepts RunEvent, DatasetEvent, JobEvent classes" raise ValueError(msg) # noqa: TRY004 if not self.transport: log.error("Tried to emit OpenLineage event, but transport is not configured.") return if log.isEnabledFor(logging.DEBUG): val = Serde.to_json(event).encode("utf-8") log.debug("OpenLineageClient will emit event %s", val) if self._filters and self.filter_event(event) is None: return if event: self.transport.METHOD_NAME(event)
[ 2648 ]
def METHOD_NAME(self, other): """ Return True if *self* and *other* are of the same type, False otherwise. """ return other.value.__class__ is self.value.__class__
[ 250, 1101, 44 ]
def METHOD_NAME(running_app): valid_full = { "title": {"en": "Creative Commons Attribution 4.0 International"}, "description": {"en": "A description"}, "link": "https://creativecommons.org/licenses/by/4.0/", } assert valid_full == RightsSchema().load(valid_full)
[ 9, 1205, 324, 3712, 526 ]
f METHOD_NAME(self):
[ 9, 3655, 24, 188, 1097, 188, 245 ]
def METHOD_NAME(): for e in self.exprs: yield from e.make_generator()()
[ 2963, 370 ]
def METHOD_NAME( trainer_controller_with_start_learning_mocks, ): tc, trainer_mock = trainer_controller_with_start_learning_mocks tc.train_model = False env_mock = MagicMock() env_mock.close = MagicMock() env_mock.reset = MagicMock() env_mock.training_behaviors = MagicMock() tc.start_learning(env_mock) env_mock.reset.assert_called_once() assert tc.advance.call_count == 11 tc._save_models.assert_not_called()
[ 9, 447, 4960, 5692, 2245, 217, 654 ]
def METHOD_NAME() -> Path: p = TEST_DIR / 'gunw_test_data' / 'S1-GUNW-A-R-064-tops-20210723_20210711-015001-35393N_33512N-PP-6267-v2_0_4.json' return p
[ 9, 12070, 763, 157 ]
def METHOD_NAME(mixed, attr): if isinstance(mixed, InstrumentedAttribute): return getattr( mixed.property.mapper.class_, attr ) else: return getattr(mixed, attr)
[ 19, 864 ]
def METHOD_NAME(self): """test_version_comparator_compare When a version_comparator has successfully parsed its given expression,then calling the compare method executes the described comparison, returning a boolean value. """ c = version_comparator('foo == 1.2.3') self.assertIsNotNone(c.parse()) self.assertTrue(c.compare('1.2.3')) self.assertFalse(c.compare('1.2.7')) c = version_comparator('foo < 1.2.3') self.assertIsNotNone(c.parse()) self.assertTrue(c.compare('1.2.2')) self.assertTrue(c.compare('1.1.3')) self.assertTrue(c.compare('0.2.3')) self.assertFalse(c.compare('1.2.3')) self.assertFalse(c.compare('1.2.4')) self.assertFalse(c.compare('1.3.3')) self.assertFalse(c.compare('2.2.3')) c = version_comparator('foo >= 1.2.3') self.assertIsNotNone(c.parse()) self.assertTrue(c.compare('1.2.3')) self.assertTrue(c.compare('1.2.10')) self.assertFalse(c.compare('1.2.2')) self.assertTrue(c.compare('1.10.3'))
[ 9, 281, 2403, 979 ]
def METHOD_NAME(cls): """generic_func.dispatch(cls) -> <function implementation> Runs the dispatch algorithm to return the best available implementation for the given *cls* registered on *generic_func*. """ nonlocal cache_token if cache_token is not None: current_token = get_cache_token() if cache_token != current_token: dispatch_cache.clear() cache_token = current_token try: impl = dispatch_cache[cls] except KeyError: try: impl = registry[cls] except KeyError: impl = _find_impl(cls, registry) dispatch_cache[cls] = impl return impl
[ 2506 ]
def METHOD_NAME(self) -> Optional[Mapping[str, 'outputs.SAPDiskConfigurationResponse']]: """ The disk configuration for the db volume. For HANA, Required volumes are: ['hana/data', 'hana/log', hana/shared', 'usr/sap', 'os'], Optional volume : ['backup']. """ return pulumi.get(self, "volume_configurations")
[ 2276, 4880 ]
def METHOD_NAME(self): n = 20 for k in range(2): m = random([n, n])+1j*random([n, n]) for i in range(n): m[i, i] = 20*(.1+abs(m[i, i])) a = dot(transpose(conjugate(m)), m) c = cholesky(a) a1 = dot(transpose(conjugate(c)), c) assert_array_almost_equal(a, a1) c = transpose(c) a = dot(c, transpose(conjugate(c))) assert_array_almost_equal(cholesky(a, lower=1), c)
[ 9, 236, 2587 ]
def METHOD_NAME(text: str) -> Tuple[str, str]: text_list = text.split() return text, ' '.join(text_list[:-2] + ['<mask>', text_list[-1]])
[ 238, 361, 24, 679, 2236 ]
def METHOD_NAME(): nest.ResetKernel()
[ 656 ]
def METHOD_NAME() -> QPalette: # 'Zion Reversed' color scheme from KDE. window = QColor(16, 16, 16) return _make_palette( text=QColor(Qt.white), text_disabled=QColor(85, 85, 85), window=window, base=QColor(Qt.black), highlight=QColor(0, 49, 110), highlight_disabled=window, link=QColor(128, 181, 255), light=QColor(174, 174, 174), mid=QColor(89, 89, 89), dark=QColor(118, 118, 118), shadow=QColor(141, 141, 141), )
[ -1, 4536 ]
def METHOD_NAME(self, qspec: TFQuantizerSpec, op_name: str) -> Quantizer: quantizer_cls = NNCF_QUANTIZATION_OPERATIONS.get(qspec.mode) return quantizer_cls(op_name, qspec)
[ 129, 6085 ]
def METHOD_NAME(session=None): """ Check whether a given session, or the current session if left blank, is logged in to Pavlovia. Returns: status : bool True if logged in after process is completed, False otherwise. """ if session is None: # Substitute default session session = pavlovia.getCurrentSession() if session and session.user: # If logged in, return positive return True else: # If not logged in, prompt to login dlg = wx.MessageDialog(None, message=_translate( "You are not logged in to Pavlovia. Please log in to continue." ), style=wx.ICON_AUTH_NEEDED | wx.OK | wx.CANCEL) dlg.SetOKLabel(_translate("Login...")) if dlg.ShowModal() == wx.ID_OK: # If they click Login, open login screen user = logInPavlovia(None) # Return positive or negative based on whether login succeeded return bool(user) else: # If they cancel out of login prompt, return negative return False
[ 250, 273 ]
def METHOD_NAME(): with patch( "salt.utils.napalm.get_device", MagicMock(return_value=napalm_test_support.MockNapalmDevice()), ): ret = napalm_network.config_control() assert ret == (True, "")
[ 9, 200, 401 ]
def METHOD_NAME(self, driver, bus, addr, times, node): """ Register the hwmon device multiple times to fix loading issue Param: string: driver name like "fsp550" int: bus, decimal format like 11 hex: addr, hex format like 0x59 string: node like 'fan1_input' Returns: bool: true for success , false for fail """ count = 0 while count < times: self.new_i2c_device(driver, addr, bus) ret = os.system("ls /sys/bus/i2c/devices/i2c-%d/%d-%4.4x/hwmon/hwmon*/ | grep %s > /dev/null" % (bus, bus, addr, node)) if ret == 0: return True os.system("echo 0x%4.4x > /sys/bus/i2c/devices/i2c-%d/delete_device" % (addr, bus)) count = count + 1 return False
[ 372, -1, 457, 3148 ]
def METHOD_NAME(self): cleaned_data = super().METHOD_NAME() if not cleaned_data["title"]: raise ValidationError("Title field is required") return cleaned_data
[ 1356 ]
def METHOD_NAME(func): func.description = description func.recommendation = recommendation func.skip_on_failed = skip_on_failed func.post_fail_fix_hook = post_fail_fix_hook HEALTH_CHECKS.append(func) return func
[ 972 ]
def METHOD_NAME(input_data): expiry_date = input_data.get("expiry_date") if expiry_date and not is_date_in_future(expiry_date): raise ValidationError( { "expiry_date": ValidationError( "Expiry date cannot be in the past.", code=GiftCardErrorCode.INVALID.value, ) } )
[ 1356, 3191, 153 ]
def METHOD_NAME(self): self.assertRaises(ValueError, AbstractPickleTests.METHOD_NAME, self)
[ 9, 2203, 457 ]
def METHOD_NAME(self): self.pre_operations() yield self.AmlFilesystemsDelete(ctx=self.ctx)() self.post_operations()
[ 750, 710 ]
def METHOD_NAME(self): pass
[ 709, 710 ]
def METHOD_NAME( mocker, set_environment_variables, file_path, expected_output ): """ Validates that the context of a log file is extracted correctly. """ mocker.patch.object(DefaultApi, "download_file", return_value=file_path) runner = Runner("Query", json_to_outputs=True) temp = runner._return_context_dict_from_log(["123"]) assert temp == expected_output
[ 9, 1413, 772, 141, 280, 390 ]
def METHOD_NAME(_dict, name): """Verify the potential template is properly formatted.""" assert ( "name" in _dict ), f"Key name not found in the potential template {name}.json" assert ( "expression" in _dict ), f"Key expression not found in the potential template {name}.json" assert ( "independent_variables" in _dict ), f"Key independent_variables not found in the potential template {name}.json" if str(name) != _dict["name"]: raise GMSOError( f'Mismatch between Potential name {name} and {_dict["name"]}' )
[ 1162, 2993, 671, 219 ]
def METHOD_NAME(self): self.pkgdesc = f"Cross-toolchain LLVM libunwind ({an} static library)" self.depends = [f"libunwind-cross-{an}={pkgver}-r{pkgrel}"] return [f"usr/{at}/usr/lib/libunwind.a"]
[ -1 ]
def METHOD_NAME(buffer): pool = buffer.get_connection_pool() assert pool.connection_kwargs["host"] == BROKER_URL_HOST assert pool.connection_kwargs["socket_timeout"] == buffer.connection_timeout assert ( pool.connection_kwargs["socket_connect_timeout"] == buffer._socket_connect_timeout ) assert pool.connection_kwargs["client_name"] == buffer._client_name
[ 9, 19, 550, 1567 ]
def METHOD_NAME(string): return binascii.a2b_hex(string)
[ 275, 144 ]
def METHOD_NAME(self): """ :return: common directory """ ll = [j.path.split(os.sep) for j in self] common = os.path.commonprefix(ll) if common: return os.sep.join(common + [""])
[ -1 ]
f METHOD_NAME():
[ 214, 1441, 1155 ]
def METHOD_NAME(self) -> str: """ The current state of dnc controller resource. """ return pulumi.get(self, "provisioning_state")
[ 1994, 551 ]
def METHOD_NAME(self): return self.provider and self.provider.startswith("integrations:")
[ 220, 1911, 2275 ]
def METHOD_NAME(cls, result: str): eth_value, epoch = result.split(":") epoch_timestamp = datetime.fromtimestamp(float(epoch), timezone.utc) return cls(float(eth_value), epoch_timestamp)
[ 280, 144 ]
def METHOD_NAME(self): """Playlist administrators cannot delete playlists with Markdown document.""" user = factories.UserFactory() playlist = factories.PlaylistFactory() factories.PlaylistAccessFactory( user=user, playlist=playlist, role=models.ADMINISTRATOR ) MarkdownDocumentFactory(playlist=playlist) jwt_token = UserAccessTokenFactory(user=user) self.assertEqual(models.Playlist.objects.count(), 1) response = self.client.delete( f"/api/playlists/{playlist.id}/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 400) self.assertEqual(response.json(), "Resources are still attached to playlist") self.assertEqual(models.Playlist.objects.count(), 1)
[ 9, 34, 556, 41, 108, 352, 604 ]
def METHOD_NAME(tmpdir): """ Test that a query generated from Maraboupy can be saved and loaded correctly and return timeout. This query is expected to be UNSAT but is currently unsolveable within one second. If future improvements allow the query to be solved within a second, then this test will need to be updated. """ network = load_acas_network() # Set output constraint outputVars = network.outputVars[0].flatten() outputVar = outputVars[0] minOutputValue = 1500.0 network.setLowerBound(outputVar, minOutputValue) # Save this query to a temporary file, and reload the query): queryFile = tmpdir.mkdir("query").join("query.txt").strpath network.saveQuery(queryFile) ipq = Marabou.load_query(queryFile) # Solve the query loaded from the file and compare to the solution of the original query opt = Marabou.createOptions(verbosity = 0, timeoutInSeconds = 1) exitCode_net, vals_net, stats_net = network.solve(options = opt) exitCode_ipq, vals_ipq, stats_ipq = Marabou.solve_query(ipq, options = opt) # Assert timeout assert stats_net.hasTimedOut() assert stats_ipq.hasTimedOut() assert(exitCode_net == "TIMEOUT" and exitCode_ipq == "TIMEOUT")
[ 9, 24, 539 ]
def METHOD_NAME(private_endpoint_connection_name: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspacePrivateEndpointConnectionResult: """ Gets the specified private endpoint connection associated with the workspace. Azure REST API version: 2023-02-28. :param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource :param str resource_group_name: The name of the resource group that contains the service instance. :param str workspace_name: The name of workspace resource. """ __args__ = dict() __args__['privateEndpointConnectionName'] = private_endpoint_connection_name __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:healthcareapis:getWorkspacePrivateEndpointConnection', __args__, opts=opts, typ=GetWorkspacePrivateEndpointConnectionResult).value return AwaitableGetWorkspacePrivateEndpointConnectionResult( id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), private_endpoint=pulumi.get(__ret__, 'private_endpoint'), private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'), provisioning_state=pulumi.get(__ret__, 'provisioning_state'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'))
[ 19, 1976, 547, 841, 550 ]
def METHOD_NAME(servicer, server): rpc_method_handlers = { 'GetServerResponse': grpc.unary_unary_rpc_method_handler( servicer.GetServerResponse, request_deserializer=test__pb2.Message.FromString, response_serializer=test__pb2.MessageResponse.SerializeToString, ), 'GetServerResponseAbort': grpc.unary_unary_rpc_method_handler( servicer.GetServerResponseAbort, request_deserializer=test__pb2.Message.FromString, response_serializer=test__pb2.MessageResponse.SerializeToString, ), 'GetServerResponseUnavailable': grpc.unary_unary_rpc_method_handler( servicer.GetServerResponseUnavailable, request_deserializer=test__pb2.Message.FromString, response_serializer=test__pb2.MessageResponse.SerializeToString, ), 'GetServerResponseException': grpc.unary_unary_rpc_method_handler( servicer.GetServerResponseException, request_deserializer=test__pb2.Message.FromString, response_serializer=test__pb2.MessageResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'test.TestService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API.
[ 238, 9, 549, 2711, 24, 163 ]
def METHOD_NAME(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train' ) ]) message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'test' ) ]) return message
[ 363, 365, 781 ]
def METHOD_NAME(self): return self.contributor_type == self.CONTRIBUTOR_AUTHOR
[ 220, 3399, 2997 ]
def METHOD_NAME(a: _[0:X], b: _[0:Y], c: _[1:Z], d: _[2:W - 2], e: _[0:U]): input << A[i, j, k, l, a, b, c, d, e] output >> B(1, lambda a, b: a + b)[i, j, k, l] output = input
[ 573 ]
def METHOD_NAME(self): if isinstance(self.options, SCOptions): options = self.options() else: options = SCOptions()(self.options) if options.linalg.solver is None: options.linalg.solver = parapint.linalg.MPISchurComplementLinearSolver( subproblem_solvers={ndx: parapint.linalg.InteriorPointMA27Interface(cntl_options={1: 1e-6}) for ndx in range(len(self.all_scenario_names))}, schur_complement_solver=parapint.linalg.InteriorPointMA27Interface(cntl_options={1: 1e-6})) status = parapint.algorithms.ip_solve(interface=self.interface, options=options) if status != parapint.algorithms.InteriorPointStatus.optimal: raise RuntimeError('Schur-Complement Interior Point algorithm did not converge') self.interface.load_primals_into_pyomo_model() return status
[ 283 ]
def METHOD_NAME(): pass
[ 175, 717 ]
def METHOD_NAME(self): info = super(ARCWorkflowProxy, self).METHOD_NAME() info["ce"] = "ce: {}".format(",".join(self.task.arc_ce)) info = self.task.arc_destination_info(info) return info
[ 3836, 100 ]
def METHOD_NAME(project): """Return service account ID for project.""" # From # cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/create: # # The account id that is used to generate the service account email address # and a stable unique id. It is unique within a project, must be 6-30 # characters long, and match the regular expression [a-z]([-a-z0-9]*[a-z0-9]) # to comply with RFC1035. account_id = _ACCOUNT_PREFIX + project.replace('_', '-') if not account_id[-1].isalnum(): # Must end in '[a-z][0-9]'. account_id += '0' if len(account_id) < _MIN_LEN: # Must be at least |min_len| in length. account_id = account_id.ljust(_MIN_LEN, '0') # Use a hash prefix as the service account name if the project name is too # long. if len(account_id) > _MAX_LEN: account_id = _ACCOUNT_PREFIX + utils.string_hash(project)[:_HASH_PREFIX_LEN] assert len(account_id) >= _MIN_LEN and len(account_id) <= _MAX_LEN return account_id
[ 549, 598, 147 ]
def METHOD_NAME(read=False, write=False, execute=False): pl = ['-', '-', '-'] if read: pl[0] = 'r' if write: pl[1] = 'w' if execute: pl[2] = 'x' return pl
[ 2878, 245 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "catalogName", self.ctx.args.catalog_name, required=True, ), **self.serialize_url_param( "devCenterName", self.ctx.args.dev_center_name, required=True, ), **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
async def METHOD_NAME( aiohttp_server: _TestServerFactory, make_client: _MakeClient ) -> None: async def handler(request: web.Request) -> web.Response: data = await request.json() assert data == { "key": "name", "value": base64.b64encode(b"data").decode("ascii"), "org_name": "test-org", "project_name": "test-project", } raise web.HTTPCreated app = web.Application() app.router.add_post("/secrets", handler) srv = await aiohttp_server(app) async with make_client(srv.make_url("/")) as client: await client.secrets.add("name", b"data", org_name="test-org")
[ 9, 238, 41, 3411 ]
def METHOD_NAME(self, section, option): """ Args: section (str): The section str to search for. option (str): The option str to search for. Returns: str: Returns the value of the option in the specified section. """ if section not in self.data: raise NoSectionError(section) header = self.data.METHOD_NAME(section) if option not in header: raise NoOptionError(section, option) return header.METHOD_NAME(option)
[ 19 ]
def METHOD_NAME(self, http_context): """ List all available managers. :param http_context: HttpContext :type http_context: HttpContext :return: List of managers, one manager per dict :rtype: list of dict """ return [ { 'id': mgr.id, 'name': mgr.name, } for mgr in self.managers.values() ]
[ 276, 58, 11849 ]
def METHOD_NAME(self) -> bool: return all((self.base_port, self.own_ip, self.node_ips))
[ 137, 4196 ]
def METHOD_NAME(op, data, key, default, **kwargs): defaultiter = iter(default.values) return key.map( lambda k, data=data, defaultiter=defaultiter: safe_get( data, k, next(defaultiter) ) )
[ 422, 19, 553, 4045, 4045 ]
def METHOD_NAME(self): promise = self.pool.new() producer = promise() assert isinstance(producer, Producer) self.connections.acquire.assert_called_with(block=True)
[ 9, 80 ]
def METHOD_NAME(self) -> tuple[AbstractSweep, ...]: return self._sweeps
[ 14994 ]
def METHOD_NAME(cls): cls.bot_reference = ShadowserverParserBot cls.default_input_message = EXAMPLE_REPORT
[ 0, 1227 ]
def METHOD_NAME(s): for artifact, desired in _special_case_mapping.items(): s = s.replace(artifact, desired) return s
[ 276, 341, 331 ]
def METHOD_NAME(m): if len(list(m.children())) > 0: return m.register_buffer('total_ops', paddle.zeros([1], dtype='int64')) m.register_buffer('total_params', paddle.zeros([1], dtype='int64')) m_type = type(m) flops_fn = None if m_type in custom_ops: flops_fn = custom_ops[m_type] if m_type not in types_collection: print("Customize Function has been applied to {}.".format( m_type)) elif m_type in register_hooks: flops_fn = register_hooks[m_type] if m_type not in types_collection: print("{}'s FLOPs has been counted.".format(m_type)) else: if m_type not in types_collection: print( "Cannot find suitable count function for {}. Treat it as zero FLOPs." .format(m_type)) if flops_fn is not None: flops_handler = m.register_forward_post_hook(flops_fn) handler_collection.append(flops_handler) params_handler = m.register_forward_post_hook(count_parameters) io_handler = m.register_forward_post_hook(count_io_info) handler_collection.append(params_handler) handler_collection.append(io_handler) types_collection.add(m_type)
[ 238, 4602 ]
def METHOD_NAME(self, loss_params, metric_params, handler_params, expected_avg): loss_fn = DiceLoss(**loss_params) metric_fn = LossMetric(loss_fn=loss_fn, **metric_params) ignite_metric = IgniteMetricHandler(metric_fn=metric_fn, **handler_params) def _val_func(engine, batch): pass engine = Engine(_val_func) ignite_metric.attach(engine=engine, name="ignite_dice_loss") y_pred = torch.tensor([[[[0.0, 1.0]], [[1.0, 0.0]]]]) y = torch.tensor([[[[0.0, 1.0]], [[0.0, 1.0]]]]) engine.state.output = {"pred": y_pred, "label": y} engine.fire_event(Events.ITERATION_COMPLETED) y_pred = torch.tensor([[[[0.0, 1.0]], [[1.0, 0.0]]]]) y = torch.tensor([[[[0.0, 1.0]], [[1.0, 0.0]]]]) engine.state.output = {"pred": y_pred, "label": y} engine.fire_event(Events.ITERATION_COMPLETED) engine.fire_event(Events.EPOCH_COMPLETED) assert_allclose(engine.state.metrics["ignite_dice_loss"], expected_avg, atol=1e-4, rtol=1e-4, type_test=False)
[ 9, 1341, 667 ]
def METHOD_NAME( self, app: Flask, user_1: User, sport_1_cycling: Sport, user_sport_1_preference: UserSportPreference, ) -> None: user_sport_1_preference.stopped_speed_threshold = 0.5 db.session.commit() serialized_sport = sport_1_cycling.serialize( sport_preferences=user_sport_1_preference.serialize() ) assert serialized_sport['id'] == 1 assert serialized_sport['label'] == 'Cycling' assert serialized_sport['is_active'] is True assert serialized_sport['is_active_for_user'] is True assert serialized_sport['color'] is None assert serialized_sport['stopped_speed_threshold'] == 0.5 assert 'has_workouts' not in serialized_sport
[ 9, 8053, 578, 41, 853, 3216 ]
def METHOD_NAME(liquidity_table: pd.DataFrame) -> pd.DataFrame: liquidity_table = liquidity_table.dropna() liquidity_table.contracts = liquidity_table.contracts.astype(int) liquidity_table.risk = liquidity_table.risk.round(2) return liquidity_table
[ 5742, 275, 7679, 410 ]
def METHOD_NAME(self) -> Optional[Sequence['outputs.DatabaseBackupSettingResponse']]: """ Databases included in the backup. """ return pulumi.get(self, "databases")
[ 5938 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME(self): parameters = { **self.serialize_header_param( "Accept", "application/json", ), } return parameters
[ 572, 386 ]
def METHOD_NAME(): form = CSVValidationMixin() # wrap CSV up in SimpleUploadedFile to mirror how a Django view would # handle it csv_data = "id,description\n0301012A0AA,Adrenaline (Asthma)" upload_file = csv_builder(csv_data) uploaded_file = SimpleUploadedFile("our csv", upload_file.read()) form.cleaned_data = {"csv_data": uploaded_file, "coding_system_id": "bnf"} with pytest.raises(ValidationError) as e: form.clean_csv_data() assert len(e.value.messages) == 1 assert ( e.value.messages[0] == "Expected code header not found: 'dmd_id' or 'code' required" )
[ 9, 17093, 654, 544, 572 ]
def METHOD_NAME(self, request, activity, session): """ This method is called when the dashboard of an activity is requested for a student. :return: A rendered template of the student dashboard """ raise PermissionDenied()
[ 1316, 3029 ]
async def METHOD_NAME(session_manager, session): """Test that adding a session also makes that new session active""" assert session_manager._active.active_id == session.meta.identifier
[ 9, 238, 4213, 240 ]
f METHOD_NAME(self, mock_warning):
[ 9, 2497, 559, 841 ]
METHOD_NAME(self):
[ 29 ]
def METHOD_NAME(escaped_str, result): """ Run echo with escaped shell string, return True if output match with virt-admin result, else return False. :param escaped_str: escaped shell string :param result: virt-admin echo output with the escaped string :return: True or False due to match of the output """ cmd = "echo %s" % escaped_str cmd_result = process.run(cmd, ignore_status=True, shell=True) output = cmd_result.stdout_text.strip() logging.debug("Shell echo result is: %s", output) return (output == result)
[ 250, 1605, 2770 ]
def METHOD_NAME(self, input): if isinstance(input, str): with open(input, 'rb') as f: im_read = f.read() data = np.frombuffer(im_read, dtype='uint8') im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) else: im = input return im
[ 1268, 660 ]
def METHOD_NAME(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, max_length=model_args.max_seq_length, truncation=True) if "label" in examples: # In all cases, rename the column to labels because the model will expect that. result["labels"] = examples["label"] return result
[ 666, 559 ]
def METHOD_NAME(lines: Tuple[str]) -> Set[str]: """Creates a set that removes empty lines and comments.""" lines = [ln.strip() for ln in lines] # removes first `/` character for posix and `\\` for windows lines = [ln.lstrip("/").lstrip("\\") for ln in lines if ln != "" and not ln.startswith("#")] # convert to path and converting back to string to sanitize the pattern return {str(Path(ln)) for ln in lines}
[ 214, 12426 ]
def METHOD_NAME( self, client: MLClient, rand_batch_name: Callable[[], str], rand_batch_deployment_name: Callable[[], str] ) -> None: endpoint_yaml = "./tests/test_configs/endpoints/batch/batch_endpoint_deployment_component.yaml" endpoint_name = rand_batch_name("endpoint_name") endpoint = load_batch_endpoint(endpoint_yaml) endpoint.name = endpoint_name # Create deployment using local files deployment_yaml = "./tests/test_configs/deployments/batch/batch_deployment_component.yaml" deployment_name = rand_batch_deployment_name("deployment_name") deployment = load_batch_deployment(deployment_yaml) deployment.endpoint_name = endpoint_name deployment.name = deployment_name # create the batch endpoint endpoint = client.batch_endpoints.begin_create_or_update(endpoint).result() # create a deployment client.batch_deployments.begin_create_or_update(deployment).result() # Batch endpoint invoke using different supported inputs inputs_dict = { "input_1": Input(path="azureml:list_data_v2_test:2", type="uri_folder"), "input_2": Input(path="azureml:list_data_v2_test:2", type="uri_folder"), } job = client.batch_endpoints.invoke( endpoint_name=endpoint.name, deployment_name=deployment.name, inputs=inputs_dict, ) assert job
[ 9, 2277, 1007 ]