text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(args: T.List[str]) -> T.List[str]: final_args: T.List[str] = [] for i in args: if i.endswith('.js') and not i.startswith('-'): final_args += ['--js-library', i] else: final_args += [i] return final_args
[ 503, 3382, 3403 ]
def METHOD_NAME( cls, db: Session, client_id_byte_length: int, client_secret_byte_length: int, *, scopes: list[str] | None = None, fides_key: str = None, user_id: str = None, encoding: str = "UTF-8", roles: list[str] | None = None, systems: list[str] | None = None, ) -> tuple["ClientDetail", str]: """Creates a ClientDetail and returns that along with the unhashed secret so it can be returned to the user on create """ client_id = generate_secure_random_string(client_id_byte_length) secret = generate_secure_random_string(client_secret_byte_length) if not scopes: scopes = DEFAULT_SCOPES if not roles: roles = DEFAULT_ROLES if not systems: systems = DEFAULT_SYSTEMS salt = generate_salt() hashed_secret = hash_with_salt( secret.encode(encoding), salt.encode(encoding), ) client = super().create( db, data={ "id": client_id, "salt": salt, "hashed_secret": hashed_secret, "scopes": scopes, "fides_key": fides_key, "user_id": user_id, "roles": roles, "systems": systems, }, ) return client, secret # type: ignore
[ 129, 340, 61, 444 ]
def METHOD_NAME(reps, expected): """Test various run times. Values taken from Weber November 2021.""" qubits = cirq.GridQubit.rect(2, 5) circuit = cirq.testing.random_circuit(qubits, n_moments=10, op_density=1.0) runtime = runtime_estimator.estimate_run_time(circuit, repetitions=reps) _assert_about_equal(runtime, expected)
[ 9, 918, 22, 104, 5970, 16309 ]
def METHOD_NAME(self, p: int) -> int: NN = self.number_of_nodes() NE = self.number_of_edges() NC = self.number_of_cells() return NN + (p-1)*NE + (p-2)*(p-1)//2*NC
[ 106, 47, 285, 12175 ]
def METHOD_NAME(ctx): return "strict"
[ 2982, 2131, 3678, 854 ]
def METHOD_NAME(self, invalid_form_errors): """ Helper method to test the valid form and an invalid form. Input the expected form error of the invalid form. Remember, this method name cannot begin with 'test' """ self.assertTrue(self.valid_form.is_valid()) self.assertFalse(self.invalid_form.is_valid()) self.assertEqual(self.invalid_form.errors, invalid_form_errors)
[ 22, 9, 1725 ]
def METHOD_NAME(self) -> PaginatedList[github.Repository.Repository]: """ :calls: `GET /installation/repositories <https://docs.github.com/en/rest/reference/integrations/installations#list-repositories>`_ """ url_parameters: dict[str, Any] = {} return PaginatedList( contentClass=github.Repository.Repository, requester=self._requester, firstUrl="/installation/repositories", firstParams=url_parameters, headers=INTEGRATION_PREVIEW_HEADERS, list_item="repositories", )
[ 19, 4822 ]
def METHOD_NAME(username, org_id=None): if org_id is None: owned_repos = seafile_api.get_owned_repo_list(username) else: owned_repos = seafile_api.get_org_owned_repo_list(org_id, username) return owned_repos
[ 19, 5118, 4822 ]
def METHOD_NAME(self): return [ self.InputItem( name=_("业务 ID"), key="biz_cc_id", type="string", schema=StringItemSchema(description=_("当前操作所属的 CMDB 业务 ID")), ), self.InputItem( name=_("填参方式"), key="cc_module_select_method", type="string", schema=StringItemSchema(description=_("模块填入方式,拓扑(topo),层级文本(text)"), enum=["topo", "text"]), ), self.InputItem( name=_("主机内网 IP"), key="cc_host_ip", type="string", schema=StringItemSchema(description=_("待转移的主机内网 IP,多个用英文逗号 `,` 分隔")), ), self.InputItem( name=_("拓扑-模块"), key="cc_module_select_topo", type="array", schema=ArrayItemSchema( description=_("转移目标模块 ID 列表"), item_schema=IntItemSchema(description=_("模块 ID")) ), ), self.InputItem( name=_("文本路径-模块"), key="cc_module_select_text", type="string", schema=StringItemSchema(description=_("请输入完整路径,从业务拓扑开始,如`业务A>集群B>模块C`,多个目标模块用换行分隔")), ), self.InputItem( name=_("转移方式"), key="cc_is_increment", type="string", schema=StringItemSchema(description=_("主机转移方式,覆盖(false)或追加(true)"), enum=["false", "true"]), ), ]
[ 1461, 275 ]
def METHOD_NAME(self): mock_session = mock.create_autospec(Session) data = self.deployment_v2() mock_session.post.return_value = self.response_200(data) return mock_session
[ 240, 5792, 1072 ]
def METHOD_NAME(self, next_header): self.set_byte(6, next_header)
[ 0, 243, 572 ]
def METHOD_NAME(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].METHOD_NAME() except KeyError: raise KeyError('No keys found in the first mapping.')
[ 7568 ]
def METHOD_NAME(analysis_context): """ Define the function to create pipeline for parsing the log data. It has also to define an AtomizerFactory to instruct py how to process incoming data streams to create log atoms from them. """ # Build the parsing model: from aminer.parsing.FirstMatchModelElement import FirstMatchModelElement from aminer.parsing.SequenceModelElement import SequenceModelElement from aminer.parsing.DateTimeModelElement import DateTimeModelElement from aminer.parsing.FixedDataModelElement import FixedDataModelElement from aminer.parsing.DelimitedDataModelElement import DelimitedDataModelElement from aminer.parsing.AnyByteDataModelElement import AnyByteDataModelElement service_children_disk_upgrade = [ DateTimeModelElement('Date', b'%d.%m.%Y %H:%M:%S'), FixedDataModelElement('UName', b' ubuntu '), DelimitedDataModelElement('User', b' '), FixedDataModelElement('HD Repair', b' System rebooted for hard disk upgrade')] service_children_home_path = [ FixedDataModelElement('Pwd', b'The Path of the home directory shown by pwd of the user '), DelimitedDataModelElement('Username', b' '), FixedDataModelElement('Is', b' is: '), AnyByteDataModelElement('Path')] parsing_model = FirstMatchModelElement('model', [ SequenceModelElement('Disk Upgrade', service_children_disk_upgrade), SequenceModelElement('Home Path', service_children_home_path)]) # Some generic imports. from aminer.analysis import AtomFilters # Create all global handler lists here and append the real handlers later on. # Use this filter to distribute all atoms to the analysis handlers. atom_filter = AtomFilters.SubhandlerFilter(None) from aminer.events.StreamPrinterEventHandler import StreamPrinterEventHandler stream_printer_event_handler = StreamPrinterEventHandler(None) anomaly_event_handlers = [stream_printer_event_handler] # Now define the AtomizerFactory using the model. A simple line based one is usually sufficient. from aminer.input.SimpleByteStreamLineAtomizerFactory import SimpleByteStreamLineAtomizerFactory analysis_context.atomizer_factory = SimpleByteStreamLineAtomizerFactory( parsing_model, [atom_filter], anomaly_event_handlers, default_timestamp_path_list=['']) # Just report all unparsed atoms to the event handlers. from aminer.analysis.UnparsedAtomHandlers import SimpleUnparsedAtomHandler atom_filter.add_handler(SimpleUnparsedAtomHandler(anomaly_event_handlers), stop_when_handled_flag=True) from aminer.analysis.NewMatchPathDetector import NewMatchPathDetector new_match_path_detector = NewMatchPathDetector(analysis_context.aminer_config, anomaly_event_handlers, learn_mode=True) analysis_context.register_component(new_match_path_detector, component_name=None) atom_filter.add_handler(new_match_path_detector) from aminer.analysis.NewMatchPathValueComboDetector import NewMatchPathValueComboDetector new_match_path_value_combo_detector = NewMatchPathValueComboDetector(analysis_context.aminer_config, [ '/model/Home Path/Username', '/model/Home Path/Path'], anomaly_event_handlers, learn_mode=True) analysis_context.register_component(new_match_path_value_combo_detector, component_name=None) atom_filter.add_handler(new_match_path_value_combo_detector) # Include the e-mail notification handler only if the configuration parameter was set. from aminer.events.DefaultMailNotificationEventHandler import DefaultMailNotificationEventHandler if DefaultMailNotificationEventHandler.CONFIG_KEY_MAIL_TARGET_ADDRESS in analysis_context.aminer_config.config_properties: mail_notification_handler = DefaultMailNotificationEventHandler(analysis_context) analysis_context.register_component(mail_notification_handler, component_name=None) anomaly_event_handlers.append(mail_notification_handler)
[ 56, 689, 1148 ]
def METHOD_NAME(self, partial_key, queryset): return queryset.order_by("-round__start_date")
[ 527, -1, 43, 4510 ]
def METHOD_NAME(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, email: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = EmailIdentityArgs.__new__(EmailIdentityArgs) if email is None and not opts.urn: raise TypeError("Missing required property 'email'") __props__.__dict__["email"] = email __props__.__dict__["arn"] = None super(EmailIdentity, __self__).__init__( 'aws:ses/emailIdentity:EmailIdentity', resource_name, __props__, opts)
[ 2026, 176 ]
def METHOD_NAME(*args): """ Joins given arguments into an url. Trailing but not leading slashes are stripped for each argument. :return: string """ return '/'.join(arg.strip('/') for arg in args)
[ 8387 ]
def METHOD_NAME(self, *keyvalues): for k, v in keyvalues: self.assertEquals(v, self.testdict[k])
[ -1 ]
def METHOD_NAME(self): pass
[ 128, 11384 ]
def METHOD_NAME(self, command_args): super().METHOD_NAME(command_args) return self.build_lro_poller(self._execute_operations, None)
[ 1519 ]
def METHOD_NAME(): # TODO: right now we will simply check if the operations work # for some boring inputs. For some of these, we will want to # test corner cases in the future. # Math operations ops = [ "add", # "divmod", "equal", "fmax", "fmin", "greater", "greater_equal", # "heaviside", # "ldexp", "less", "less_equal", "logical_and", "logical_or", "logical_xor", "maximum", "minimum", "multiply", "not_equal", "subtract", "true_divide", ] # We want to test array-array, array-scalar, and scalar-array cases arrs = ( np.random.randint(3, 10, size=(4, 5)).astype("I"), np.random.uniform(size=(4, 5)).astype("F"), ) scalars = ( np.uint64(2), np.int64(-3), np.random.randn(1)[0], np.complex64(1 + 1j), ) for arr1, arr2 in product(arrs, arrs): check_ops(ops, (arr1, arr2)) for arr, scalar in product(arrs, scalars): check_ops(ops, (arr, scalar)) check_ops(ops, (scalar, arr)) for scalar1, scalar2 in product(scalars, scalars): check_ops(ops, (scalar1, scalar2)) ops = [ "arctan2", "copysign", "floor_divide", "fmod", "hypot", "logaddexp", "logaddexp2", "nextafter", ] for arr1, arr2 in product(arrs[:-1], arrs[:-1]): check_ops(ops, (arr1, arr2)) for arr, scalar in product(arrs[:-1], scalars[:-1]): check_ops(ops, (arr, scalar)) check_ops(ops, (scalar, arr)) for scalar1, scalar2 in product(scalars[:-1], scalars[:-1]): check_ops(ops, (scalar1, scalar2)) ops = [ "power", "float_power", ] for arr1, arr2 in product(arrs, arrs): check_ops(ops, (arr1, arr2)) for arr in arrs: check_ops(ops, (arr, scalars[0])) check_ops(ops, (scalars[0], arr)) check_ops(ops, (arr, scalars[3])) check_ops(ops, (scalars[3], scalars[3])) check_ops(ops, (scalars[0], scalars[3])) check_ops(ops, (scalars[3], scalars[0])) ops = [ "remainder", ] for arr1, arr2 in product(arrs[:1], arrs[:1]): check_ops(ops, (arr1, arr2)) for arr, scalar in product(arrs[:1], scalars[:-2]): check_ops(ops, (arr, scalar)) check_ops(ops, (scalar, arr)) for scalar1, scalar2 in product(scalars[:-2], scalars[:-2]): check_ops(ops, (scalar1, scalar2)) ops = [ "bitwise_and", "bitwise_or", "bitwise_xor", "gcd", "left_shift", "lcm", "right_shift", ] check_ops(ops, (arr1[0], arr2[0])) check_ops(ops, (arrs[0], scalars[0])) check_ops(ops, (arrs[0], scalars[1])) check_ops(ops, (scalars[0], arrs[0])) check_ops(ops, (scalars[1], arrs[0])) check_ops(ops, (scalars[0], scalars[0]))
[ 9, 75 ]
def METHOD_NAME(request, event): billing_service = request.find_service(IBillingService, context=None) subscription_service = request.find_service(ISubscriptionService, context=None) match event["type"]: # Occurs when a Checkout Session has been successfully completed. case "checkout.session.completed": checkout_session = event["data"]["object"] # Get expanded checkout session object checkout_session = billing_service.get_checkout_session( checkout_session["id"], # Provide mock_checkout_session used by MockStripeBillingService only. mock_checkout_session=checkout_session, ) status = checkout_session["status"] customer_id = checkout_session["customer"]["id"] billing_email = checkout_session["customer"]["email"] subscription_id = checkout_session["subscription"]["id"] if status != "complete": raise HTTPBadRequest(f"Invalid checkout session status '{status}'") if not customer_id: raise HTTPBadRequest("Invalid customer ID") if not subscription_id: raise HTTPBadRequest("Invalid subscription ID") if id := subscription_service.find_subscriptionid(subscription_id): # Set subscription status to active. subscription_service.update_subscription_status( id, StripeSubscriptionStatus.Active ) else: # Get expanded subscription object subscription_items = checkout_session["subscription"]["items"]["data"] # Activate subscription for customer. for subscription_item in subscription_items: subscription_service.add_subscription( customer_id, subscription_id, subscription_item["id"], billing_email, ) # Occurs whenever a customer’s subscription ends. case "customer.subscription.deleted": subscription = event["data"]["object"] status = subscription["status"] customer_id = subscription["customer"] subscription_id = subscription["id"] if not status or not StripeSubscriptionStatus.has_value(status): raise HTTPBadRequest(f"Invalid subscription status '{status}'") if not customer_id: raise HTTPBadRequest("Invalid customer ID") if not subscription_id: raise HTTPBadRequest("Invalid subscription ID") if id := subscription_service.find_subscriptionid(subscription_id): # Set subscription status to canceled. subscription_service.update_subscription_status( id, StripeSubscriptionStatus.Canceled ) # Occurs whenever a subscription changes e.g. status changes. case "customer.subscription.updated": subscription = event["data"]["object"] status = subscription["status"] customer_id = subscription["customer"] subscription_id = subscription["id"] if not status or not StripeSubscriptionStatus.has_value(status): raise HTTPBadRequest(f"Invalid subscription status '{status}'") if not customer_id: raise HTTPBadRequest("Invalid customer ID") if not subscription_id: raise HTTPBadRequest("Invalid subscription ID") if id := subscription_service.find_subscriptionid(subscription_id): # Update subscription status. subscription_service.update_subscription_status(id, status) # Occurs whenever a customer is deleted. case "customer.deleted": customer = event["data"]["object"] customer_id = customer["id"] if not customer_id: raise HTTPBadRequest("Invalid customer ID") if subscription_service.get_subscriptions_by_customer(customer_id): # Delete the customer and all associated subscription data subscription_service.delete_customer(customer_id) # Occurs whenever a customer is updated. case "customer.updated": customer = event["data"]["object"] customer_id = customer["id"] billing_email = customer["email"] if not customer_id: raise HTTPBadRequest("Invalid customer ID") if not billing_email: raise HTTPBadRequest("Invalid billing email") # Update customer email subscription_service.update_customer_email(customer_id, billing_email)
[ 276, 4094, 12, 417 ]
def METHOD_NAME(replay_id: str, project_id: str, segment_id: int, segment): f = File.objects.create(name="rr:{segment_id}", type="replay.recording") f.putfile(BytesIO(compress(dumps_htmlsafe(segment).encode()))) ReplayRecordingSegment.objects.create( replay_id=replay_id, project_id=project_id, segment_id=segment_id, file_id=f.id, )
[ 1308, 2826, 1690 ]
def METHOD_NAME(self): return """\ maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. """
[ 1302, 1303 ]
def METHOD_NAME(val: bool): old_value = torchdynamo.config.optimize_ddp try: torchdynamo.config.optimize_ddp = val yield finally: torchdynamo.config.optimize_ddp = old_value
[ 5107, 8613, 3890 ]
def METHOD_NAME(list, recursive, toplevel): import sys import os for filename in list: if os.path.isdir(filename): print filename + '/:', if recursive or toplevel: print 'recursing down:' import glob names = glob.glob(os.path.join(filename, '*')) METHOD_NAME(names, recursive, 0) else: print '*** directory (use -r) ***' else: print filename + ':', sys.stdout.flush() try: print what(filename) except IOError: print '*** not found ***'
[ -1 ]
def METHOD_NAME(capsys): msg = "\U0010FFFF" msg = "123" + msg * (1024 // len(msg) + 1) m.captured_output_default(msg) stdout, stderr = capsys.readouterr() assert stdout == msg assert stderr == ""
[ 9, 1181, 3690, 6275, 10947 ]
def METHOD_NAME(self): template = " and ".join([self.template, self.template]) self.assertEqual(self.render(template), "some-text and some-text")
[ 9, 338, 529, 377, 1414, 2769 ]
def METHOD_NAME( self, name: str, value: Union[int, float] = 1, tags: Optional[Tags] = None ) -> None: # sentry metrics backend uses `incr` instead of `increment` self.__backend.incr(key=self.__merge_name(name), amount=value, tags=self.__merge_tags(tags))
[ 2978 ]
def METHOD_NAME(self): response = self.client.get(reverse(views.callback)) self.assertTemplateUsed(response, "socialaccount/authentication_error.html")
[ 9, 1076, 654, 2433, 452 ]
def METHOD_NAME( self, dataset: USAVars, mock_missing_module: str ) -> None: package = mock_missing_module if package == "pandas": with pytest.raises( ImportError, match=f"{package} is not installed and is required to use this dataset", ): USAVars(dataset.root)
[ 9, 248, 1038, 298 ]
def METHOD_NAME(__seconds: int) -> int: ...
[ 6722 ]
def METHOD_NAME(key, local_name, eng_name, rank): rank_pos = 0 # Automatically keep the arrays sorted by rank for index, item in enumerate(lang_rank): rank_pos = index if (rank > item): break lang_keys.insert(rank_pos, key) lang_local_names.insert(rank_pos, local_name) lang_eng_names.insert(rank_pos, eng_name) lang_rank.insert(rank_pos, rank)
[ 238, 1767 ]
def METHOD_NAME(logarithm_scale_setting_1, logarithm_scale_setting_2, quantization_type): for logarithm_scales in [[False, True], [True, False]]: for symmetric in [False, True]: model0, _ = create_compressed_model_and_algo_for_test( TwoConvTestModel(), get_config_for_logarithm_scale( logarithm_scale=logarithm_scale_setting_1, quantization_type=quantization_type ), ) model1, _ = create_compressed_model_and_algo_for_test( TwoConvTestModel(), get_config_for_logarithm_scale( logarithm_scale=logarithm_scale_setting_2, quantization_type=quantization_type ), ) sd0 = model0.state_dict() model1.load_state_dict(sd0) sd1 = model1.state_dict() for k, v0 in sd0.items(): v1 = sd1[k] # pylint: disable=E1136 diff = (v1 - v0).abs().sum().item() / v1.numel() assert diff < 1e-6, "symmetric {} logarithm_scales {} param {} is corrupted mean({}-{})={}".format( symmetric, logarithm_scales, k, v0, v1, diff )
[ 9, 7316, 930, 511 ]
def METHOD_NAME(state, section_size=2): """ Synthesize linear reversible circuits for all-to-all architecture using Patel, Markov and Hayes method. This function is an implementation of the Patel, Markov and Hayes algorithm from [1] for optimal synthesis of linear reversible circuits for all-to-all architecture, as specified by an n x n matrix. Args: state (list[list] or ndarray): n x n boolean invertible matrix, describing the state of the input circuit section_size (int): the size of each section, used in the Patel–Markov–Hayes algorithm [1]. section_size must be a factor of num_qubits. Returns: QuantumCircuit: a CX-only circuit implementing the linear transformation. Raises: QiskitError: when variable "state" isn't of type numpy.ndarray References: 1. Patel, Ketan N., Igor L. Markov, and John P. Hayes, *Optimal synthesis of linear reversible circuits*, Quantum Information & Computation 8.3 (2008): 282-294. `arXiv:quant-ph/0302002 [quant-ph] <https://arxiv.org/abs/quant-ph/0302002>`_ """ if not isinstance(state, (list, np.ndarray)): raise QiskitError( "state should be of type list or numpy.ndarray, " "but was of the type {}".format(type(state)) ) state = np.array(state) # Synthesize lower triangular part [state, circuit_l] = _lwr_cnot_synth(state, section_size) state = np.transpose(state) # Synthesize upper triangular part [state, circuit_u] = _lwr_cnot_synth(state, section_size) circuit_l.reverse() for i in circuit_u: i.reverse() # Convert the list into a circuit of C-NOT gates circ = QuantumCircuit(state.shape[0]) for i in circuit_u + circuit_l: circ.cx(i[0], i[1]) return circ
[ 11574, 9615, 29, 324, -1 ]
def METHOD_NAME(default=NO_DEFAULT): title = self._html_search_meta( ('fulltitle', 'title'), webpage, default=None) if not title or title == "c't": title = self._search_regex( r'<div[^>]+class="videoplayerjw"[^>]+data-title="([^"]+)"', webpage, 'title', default=None) if not title: title = self._html_search_regex( r'<h1[^>]+\bclass=["\']article_page_title[^>]+>(.+?)<', webpage, 'title', default=default) return title
[ 297, 2893 ]
def METHOD_NAME(self): with EnvVars(self.env_vars): command = get_agent_service_cmd(self.agent_version, self.platform, 'start') return run_command(command, capture=True)
[ 447, 1849 ]
def METHOD_NAME(self) -> None: if get_main_window().tk.call("winfo", "exists", ".choose_line_ending"): get_main_window().tk.call("focus", ".choose_line_ending") return old_value = self._tab.settings.get("line_ending", settings.LineEnding) self._tab.settings.set("line_ending", utils.ask_line_ending(old_value))
[ 7367, 534, 4768 ]
def METHOD_NAME() -> argparse.Namespace: argparser = argparse.ArgumentParser(description="Poetry2nix CLI") subparsers = argparser.add_subparsers(dest="subcommand") subparsers.required = True parser_lock = subparsers.add_parser("lock", help="Generate overrides for git hashes",) parser_lock.add_argument( "--lock", default="poetry.lock", help="Path to input poetry.lock", ) parser_lock.add_argument( "--out", default="poetry-git-overlay.nix", help="Output file", ) return argparser.METHOD_NAME()
[ 214, 335 ]
def METHOD_NAME( in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=notebooksCallTransformer(),
[ 1112, 1537 ]
def METHOD_NAME(self): self.canvas.delete("all") for cmd in self.display_list: if cmd.top > self.scroll + HEIGHT: continue if cmd.bottom < self.scroll: continue cmd.execute(self.scroll, self.canvas)
[ 1100 ]
def METHOD_NAME(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr
[ 19, 3492, 61, 1165, 6475 ]
def METHOD_NAME(value, nl=re.compile('\r\n|\n\r').search): value = field2string(value) match_object = nl(value) if match_object is None: return value length = match_object.start(0) result = [] start = 0 while length >= start: result.append(value[start:length]) start = length + 2 match_object = nl(value, start) if match_object is None: length = -1 else: length = match_object.start(0) result.append(value[start:]) return '\n'.join(result)
[ -1 ]
def METHOD_NAME(self, requestParameters): return None
[ 19, 1970, 103, 8267 ]
def METHOD_NAME(self): """Get the transformed coordinate set.""" if self.coords is None or self.reference_coords is None: raise Exception("No coordinates set.") if self.rot is None: raise Exception("Nothing superimposed yet.") if self.transformed_coords is None: self.transformed_coords = dot(self.coords, self.rot) + self.tran return self.transformed_coords
[ 19, 514 ]
def METHOD_NAME(builder, documentation): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(documentation), 0)
[ 1206, 1259, 238, 1200 ]
def METHOD_NAME(processor, stream): stream.feed('\x1bhello') assert len(processor.keys) == 1 + len('hello') assert processor.keys[0].key == Keys.Escape assert processor.keys[1].key == 'h' assert processor.keys[0].data == '\x1b' assert processor.keys[1].data == 'h'
[ 9, 4748 ]
f METHOD_NAME(self):
[ 102 ]
def METHOD_NAME(): """ Cleanup log_exports csv files that does not have related reocord in GenerateCSVLogRequest model """ logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") if not os.path.isdir(logs_dir): return valid_filenames_set = get_valid_filenames() for filename in os.listdir(logs_dir): if filename not in valid_filenames_set: os.remove(os.path.join(logs_dir, filename))
[ 390, 4494, 950 ]
def METHOD_NAME(request: httpx.Request) -> httpx.Response: if request.url.path == "/echo_cookies": data = {"cookies": request.headers.get("cookie")} return httpx.Response(200, json=data) elif request.url.path == "/set_cookie": return httpx.Response(200, headers={"set-cookie": "example-name=example-value"}) else: raise NotImplementedError() # pragma: no cover
[ 19, 61, 0, 880 ]
def METHOD_NAME(name, deps = [], nonportable_deps = [], **kwargs): pytype_library( name = name, deps = deps + nonportable_deps, **kwargs )
[ -1, 13817, 8603, 3106 ]
def METHOD_NAME(): _test_config( dedent( """ project: foo registry: "registry.db" provider: local online_store: type: sqlite that_field_should_not_be_here: yes path: "online_store.db" """ ), expect_error="__root__ -> online_store -> that_field_should_not_be_here\n" " extra fields not permitted (type=value_error.extra)", )
[ 9, 1967, 101 ]
def METHOD_NAME(srcdir, config, pkg_name): """Scan the project directory for things we can use to guess a description and summary.""" targets = ["copyright", "copyright.txt", "apache-2.0", "artistic.txt", "libcurllicense", "gpl.txt", "gpl2.txt", "gplv2.txt", "notice", "copyrights", "about_bsd.txt"] # look for files that start with copying or licen[cs]e (but are # not likely scripts) or end with licen[cs]e target_pat = re.compile(r"^((copying)|(licen[cs]e)|(e[dp]l-v\d+))|(licen[cs]e)(\.(txt|xml))?$") for dirpath, dirnames, files in os.walk(srcdir): for name in files: if name.lower() in targets or target_pat.search(name.lower()): license_from_copying_hash(os.path.join(dirpath, name), srcdir, config, pkg_name) # Also search for license texts in project trees that are # REUSE-compliant, or are in process of adopting this standard (for # example, KDE ecosystem packages). See https://reuse.software for # details. At a basic level, this layout requires a toplevel # `LICENSES` directory that includes separate files (with .txt # extension) for each license text that covers source code, data, # etc elsewhere in the project tree. A variant layout is currently # seen in the DPDK 20.11.3 tree, where the `LICENSES` directory is # named `license` instead. dirbase = os.path.basename(dirpath) if re.search(r'^(LICENSES|license)$', dirbase) and re.search(r'\.txt$', name): license_from_copying_hash(os.path.join(dirpath, name), srcdir, config, pkg_name) if not licenses: print_fatal(" Cannot find any license or a valid {}.license file!\n".format(pkg_name)) sys.exit(1) print("Licenses : ", " ".join(sorted(licenses)))
[ 793, 43, 4087 ]
def METHOD_NAME(capfd, mocker): set_module_args({'login_host': 'localhost', 'login_user': 'root', 'login_password': 'secret', 'key': 'foo', '_ansible_check_mode': False}) mocker.patch('redis.Redis.get', return_value='bar') with pytest.raises(SystemExit): redis_data_info.main() out, err = capfd.readouterr() print(out) assert not err assert json.loads(out)['exists'] assert json.loads(out)['value'] == 'bar'
[ 9, 2485, 365, 100, 1153, 59 ]
def METHOD_NAME(self): self._test_job_worker_start_multi(uri='/test_job_worker_start_multi_without_shared_data', jobs_cnt=2, with_shared_data=False)
[ 9, 202, 1794, 447, 457, 529, 1644 ]
f METHOD_NAME(self):
[ 9, 564, 9385, 134, 216 ]
def METHOD_NAME(self, bug, data): bugid = str(bug["id"]) cc_count = len(bug["cc"]) dups_count = len(bug["duplicates"]) votes_count = bug["votes"] see_also_count = len(bug["see_also"]) data[bugid] = { "creation": utils.get_human_lag(bug["creation_time"]), "last_change": utils.get_human_lag(bug["last_change_time"]), "severity": bug["severity"], "dups_count": dups_count, "votes": votes_count, "cc_count": cc_count, "see_also_count": see_also_count, } factors = [] if dups_count >= self.ndups: factors.append(f"{dups_count} duplicates") if votes_count >= self.votes: factors.append(f"{votes_count} votes") if cc_count >= self.cc: factors.append(f"{cc_count} CCs") if see_also_count >= self.see_also: factors.append(f"{see_also_count} See Also bugs") self.extra_ni[bugid] = { "severity": bug["severity"], "factors": utils.english_list(factors), } return bug
[ 276, 4178 ]
def METHOD_NAME(self): """Initialize the test environment and builds request/result encoding pairs.""" arguments = { "read_address": 1, "read_count": 5, "write_address": 1, } self.value = 0xABCD self.values = [0xA, 0xB, 0xC] self.request_read = { ReadRegistersRequestBase(1, 5): b"\x00\x01\x00\x05", ReadHoldingRegistersRequest(1, 5): b"\x00\x01\x00\x05", ReadInputRegistersRequest(1, 5): b"\x00\x01\x00\x05", ReadWriteMultipleRegistersRequest( write_registers=[0x00] * 5, **arguments, ): b"\x00\x01\x00\x05\x00\x01\x00" b"\x05\x0a\x00\x00\x00\x00\x00" b"\x00\x00\x00\x00\x00", ReadWriteMultipleRegistersRequest( write_registers=0xAB, **arguments, ): b"\x00\x01\x00\x05\x00\x01\x00" b"\x01\x02\x00\xAB", } self.response_read = { ReadRegistersResponseBase(self.values): TEST_MESSAGE, ReadHoldingRegistersResponse(self.values): TEST_MESSAGE, ReadInputRegistersResponse(self.values): TEST_MESSAGE, ReadWriteMultipleRegistersResponse(self.values): TEST_MESSAGE, }
[ 102, 103 ]
def METHOD_NAME( anonymizer_with_consistent_salt: Anonymizer, ): with pytest.raises(AssertionError) as e: anonymizer_with_consistent_salt._anonymize_object_info( anonymized_info_dict={}, object_=None, object_class=None, object_config=None, ) assert "Must pass either" in str(e.value)
[ 9, 4131, 279, 100, 41, 1038, 335 ]
def METHOD_NAME(self): # Test that permuting an array of objects will not cause # a segfault on garbage collection. # See gh-7719 np.random.seed(1234) a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): np.random.shuffle(a) # Force Garbage Collection - should not segfault. import gc gc.collect()
[ 9, 1124, 47, 877, 47, 635 ]
def METHOD_NAME(self): self.load_phases() self.assertEqual(len(self.widget.cake_widget.phases), 6)
[ 9, 2309, 1401, 12005 ]
def METHOD_NAME(self) -> bool: return self.FALSE_POSITIVE in self.tags
[ 137, 1168, 2302 ]
def METHOD_NAME(): ubq = UpdateByQuery() assert {} == ubq.to_dict() ubq = ubq.query("match", f=42) assert {"query": {"match": {"f": 42}}} == ubq.to_dict() assert {"query": {"match": {"f": 42}}, "size": 10} == ubq.to_dict(size=10) ubq = UpdateByQuery(extra={"size": 5}) assert {"size": 5} == ubq.to_dict() ubq = UpdateByQuery(extra={"extra_q": Q("term", category="conference")}) assert {"extra_q": {"term": {"category": "conference"}}} == ubq.to_dict()
[ 9, -1, 24, 553 ]
def METHOD_NAME(self, func): self._shutdown_handlers.append(func)
[ 372, 158, 1519 ]
def METHOD_NAME(test_binary): """ Run a single test coverage by copying to docker, save exitcode, stdout and stderr """ src_dir = os.path.abspath('.') test_binary_basename = os.path.basename(test_binary) coverage_output = f'target/cov0/{test_binary_basename}' subprocess.check_output(f'mkdir -p {coverage_output}', shell=True) coverage_output = os.path.abspath(coverage_output) if not os.path.isfile(test_binary): return -1, '', f'{test_binary} does not exist' p = subprocess.Popen([ 'docker', 'run', '--rm', '--security-opt', 'seccomp=unconfined', '-u', f'{os.getuid()}:{os.getgid()}', '-v', f'{test_binary}:{test_binary}', '-v', f'{src_dir}:{src_dir}', '-v', f'{coverage_output}:{coverage_output}', 'nearprotocol/near-coverage-runtime', 'bash', '-c', f'/usr/local/bin/kcov --include-pattern=nearcore --exclude-pattern=.so --verify {coverage_output} {test_binary}' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() return (p.returncode, stdout, stderr)
[ 2332 ]
def METHOD_NAME(self, size): """ Method called when the slits gap changes Args: size (tuple): two floats indicates beam size in microns """ self._beam_size_dict["slits"] = size self.evaluate_beam_info() self.emit_beam_info_change()
[ 16667, 6699, 1180 ]
def METHOD_NAME( module_subscribe_satellite, module_enroll_idm_and_configure_external_auth, configure_realm, module_fake_proxy, request, module_target_sat, ): """Test realm info functionality :id: 2e3e92df-61f3-4c6b-98b9-dc9c2f8d140c :expectedresults: Realm information obtained by name is correct """ realm = module_target_sat.cli_factory.make_realm( { 'name': gen_string('alpha', random.randint(1, 30)), 'realm-proxy-id': module_fake_proxy.id, 'realm-type': 'Red Hat Identity Management', 'locations': module_fake_proxy.locations.to_list(), } ) request.addfinalizer(lambda: module_target_sat.cli.Realm.delete({'id': realm['id']})) info = module_target_sat.cli.Realm.info({'name': realm['name']}) for key in info.keys(): assert info[key] == realm[key]
[ 9, 2302, 2440, 100, 156 ]
def METHOD_NAME(): # Only downgrade those that can be nullable op.alter_column( "account", "save_raw_messages", server_default=sa.sql.expression.null(), existing_type=sa.Boolean, existing_server_default=sa.sql.expression.true(), existing_nullable=None, ) op.alter_column( "block", "is_inboxapp_attachment", server_default=sa.sql.expression.null(), existing_type=sa.Boolean, existing_server_default=sa.sql.expression.false(), existing_nullable=None, )
[ 1502 ]
def METHOD_NAME(input_file, max_pred_length, args, data_holders, worker_init=None, places=None): train_data = PretrainingDataset(input_file=input_file, max_pred_length=max_pred_length) train_batch_sampler = paddle.io.BatchSampler(train_data, batch_size=args.batch_size, shuffle=True) def _collate_data(data, stack_fn=Stack()): num_fields = len(data[0]) out = [None] * num_fields # input_ids, segment_ids, input_mask, masked_lm_positions, # masked_lm_labels, next_sentence_labels, mask_token_num for i in (0, 1, 2, 5): out[i] = stack_fn([x[i] for x in data]) _, seq_length = out[0].shape size = sum(len(x[3]) for x in data) # Padding for divisibility by 8 for fp16 or int8 usage if size % 8 != 0: size += 8 - (size % 8) # masked_lm_positions # Organize as a 1D tensor for gather or use gather_nd out[3] = np.full(size, 0, dtype=np.int32) # masked_lm_labels out[4] = np.full([size, 1], -1, dtype=np.int64) mask_token_num = 0 for i, x in enumerate(data): for j, pos in enumerate(x[3]): out[3][mask_token_num] = i * seq_length + pos out[4][mask_token_num] = x[4][j] mask_token_num += 1 # mask_token_num out.append(np.asarray([mask_token_num], dtype=np.float32)) if args.use_amp and args.use_pure_fp16: # cast input_mask to fp16 out[2] = out[2].astype(np.float16) # cast masked_lm_scale to fp16 out[-1] = out[-1].astype(np.float16) return out train_data_loader = DataLoader( dataset=train_data, places=places, feed_list=data_holders, batch_sampler=train_batch_sampler, collate_fn=_collate_data, num_workers=0, worker_init_fn=worker_init, return_list=False, ) return train_data_loader, input_file
[ 129, 6952, 126 ]
def METHOD_NAME(config, server_parameters): if server_parameters.security_key is None: server_parameters.security_key = config.SECURITY_KEY if not isinstance(server_parameters.security_key, (bytes, str)): raise RuntimeError( "No security key was found for this instance of thumbor. " + "Please provide one using the conf file or a security key file." ) if config.ENGINE or config.USE_GIFSICLE_ENGINE: # Error on Image.open when image pixel count is above MAX_IMAGE_PIXELS warnings.simplefilter("error", Image.DecompressionBombWarning) if config.USE_GIFSICLE_ENGINE: server_parameters.gifsicle_path = which("gifsicle") if server_parameters.gifsicle_path is None: raise RuntimeError( "If using USE_GIFSICLE_ENGINE configuration to True," " the `gifsicle` binary must be in the PATH " "and must be an executable." )
[ 187, 200 ]
def METHOD_NAME(tmp_path): return tmp_path / "cache_dir"
[ 4136, 596, 1190 ]
f METHOD_NAME(self, args = None, namespace = None):
[ 214, 335 ]
def METHOD_NAME(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch)
[ 19, 1783, 507, 41, 5910 ]
def METHOD_NAME(self): class TestUseSpanException(Exception): pass test_span = TestSpan(trace.INVALID_SPAN_CONTEXT) with self.assertRaises(TestUseSpanException): with trace.use_span(test_span): raise TestUseSpanException("test error") self.assertEqual( test_span.recorded_status.status_code, StatusCode.ERROR ) self.assertEqual( test_span.recorded_status.description, "TestUseSpanException: test error", )
[ 9, 1080, 1244, 0, 452 ]
def METHOD_NAME(self, role: str) -> Tuple[str, AccessKey]: account_id_match = re.search(r"arn:aws:iam::([0-9]+).+", role) if account_id_match: account_id = account_id_match.group(1) else: account_id = self.account_id iam_backend = iam_backends[account_id]["global"] return account_id, iam_backend.create_temp_access_key()
[ 129, 1089, 59 ]
def METHOD_NAME(value): """ 将内置系统变量(_system.xxx)转换为可用于mako渲染统计的变量(_system点xxx) """ if isinstance(value, dict): for k, v in value.items(): value[k] = METHOD_NAME(v) if isinstance(value, list): for i, v in enumerate(value): value[i] = METHOD_NAME(v) if isinstance(value, str): return value.replace("_system.", "_system点") if "_system." in value else value return value
[ 112, 891, 24, 12900, 3 ]
def METHOD_NAME(self): return any( [ self.options.with_opengl, self.options.with_opencl, self.options.with_cuda, self.options.get_safe("with_dx"), self.options.get_safe("with_metal"), ] )
[ 7207, 1667, 1111 ]
def METHOD_NAME(self, entity_type: str, entity_name: str, check_id: str, file_path: str) -> str: return f'{entity_type}.{entity_name}.{check_id}'
[ 19, 59 ]
def METHOD_NAME(self, stream): try: self._number_of_seqs except AttributeError: return names, seqs = self._read_file(stream) seqs = ["".join(seq) for seq in seqs] if len(seqs) != self._number_of_seqs: raise ValueError( "Found %i records in this alignment, told to expect %i" % (len(seqs), self._number_of_seqs) ) for seq in seqs: if len(seq) != self._length_of_seqs: raise ValueError( "Expected all sequences to have length %d; found %d" % (self._length_of_seqs, len(seq)) ) if "." in seq: raise ValueError("PHYLIP format no longer allows dots in sequence") coordinates = Alignment.infer_coordinates(seqs) seqs = [seq.replace("-", "") for seq in seqs] records = [ SeqRecord(Seq(seq), id=name, description="") for (name, seq) in zip(names, seqs) ] alignment = Alignment(records, coordinates) del self._number_of_seqs del self._length_of_seqs return alignment
[ 203, 243, 5508 ]
def METHOD_NAME(layout, curves, name, data_type, domain): exists = curves.attributes.get(name) is not None col = layout.column() col.enabled = not exists col.operator_context = 'EXEC_DEFAULT' props = col.operator("geometry.attribute_add", text=name) props.name = name props.data_type = data_type props.domain = domain
[ 238, 2356, 309 ]
def METHOD_NAME(self): h = [2, 0] h_sifted = [0, 2] q = self._make_mapped_queue(h) q._siftup(0) assert q.heap == h_sifted self._check_map(q)
[ 9, 14025, 206, 186 ]
async def METHOD_NAME(request: web.BaseRequest): context: AdminRequestContext = request["context"] profile = context.profile storage_srv = context.inject_or(SchemaStorageService) records = await storage_srv.sync_created(profile) results = [record.serialize() for record in records] return web.json_response({"results": results})
[ 135, 948, 164, 152 ]
def METHOD_NAME(nvram): nvram = bytearray(nvram) # extract startup config offset = 0 if len(nvram) < offset + 36: raise ValueError('invalid length') if get_uint16(nvram, offset + 0) != 0xABCD: raise ValueError('no startup config') format = get_uint16(nvram, offset + 2) length = get_uint32(nvram, offset + 16) offset += 36 if len(nvram) < offset + length: raise ValueError('invalid length') startup = nvram[offset:offset + length] # compressed startup config if format == 2: try: startup = uncompress_LZC(startup) except ValueError as err: raise ValueError('uncompress startup: ' + str(err)) offset += length # alignment to multiple of 4 offset = (offset + 3) & ~3 # check for additonal offset of 4 if len(nvram) >= offset + 8 and \ get_uint16(nvram, offset + 4) == 0xFEDC and \ get_uint16(nvram, offset + 6) == 1: offset += 4 # extract private config private = None if len(nvram) >= offset + 16 and get_uint16(nvram, offset + 0) == 0xFEDC: length = get_uint32(nvram, offset + 12) offset += 16 if len(nvram) >= offset + length: private = nvram[offset:offset + length] return (startup, private)
[ 2884, 294 ]
def METHOD_NAME(self, generate_all: bool = False) -> str: """Generate a summary of all resources for an agent.""" agent_config_resource_summary = self.session.query(AgentConfiguration). \ filter(AgentConfiguration.agent_id == self.agent_id, AgentConfiguration.key == "resource_summary").first() resources = self.session.query(Resource).filter(Resource.agent_id == self.agent_id, Resource.channel == 'INPUT').all() if not resources: return resource_summary = " ".join([resource.name for resource in resources]) agent_last_resource = self.session.query(AgentConfiguration). \ filter(AgentConfiguration.agent_id == self.agent_id, AgentConfiguration.key == "last_resource_time").first() if agent_config_resource_summary is not None: agent_config_resource_summary.value = resource_summary else: agent_config_resource_summary = AgentConfiguration(agent_id=self.agent_id, key="resource_summary", value=resource_summary) self.session.add(agent_config_resource_summary) if agent_last_resource is not None: agent_last_resource.value = str(resources[-1].updated_at) else: agent_last_resource = AgentConfiguration(agent_id=self.agent_id, key="last_resource_time", value=str(resources[-1].updated_at)) self.session.add(agent_last_resource) self.session.commit()
[ 567, 1849, 2718 ]
def METHOD_NAME(self): ts.T_LONGLONG = LLONG_MAX self.assertEqual(ts.T_LONGLONG, LLONG_MAX) ts.T_LONGLONG = LLONG_MIN self.assertEqual(ts.T_LONGLONG, LLONG_MIN) ts.T_ULONGLONG = ULLONG_MAX self.assertEqual(ts.T_ULONGLONG, ULLONG_MAX) ## make sure these will accept a plain int as well as a long ts.T_LONGLONG = 3 self.assertEqual(ts.T_LONGLONG, 3) ts.T_ULONGLONG = 4 self.assertEqual(ts.T_ULONGLONG, 4)
[ 9, 10501 ]
f METHOD_NAME(self):
[ 9, 1442, 1590, 152, 604, 1056, 21 ]
def METHOD_NAME(self): "Return a message for this exception." return self._msg
[ 19, 277 ]
def METHOD_NAME(): ad = AttrDict() ad["one"] = "one" ad[1] = 1 ad[("a", 2)] = ("a", 2) assert list(ad.keys()) == ["one", 1, ("a", 2)] assert len(ad) == 3 plk = pickle.dumps(ad) pad = pickle.loads(plk) assert list(pad.keys()) == ["one", 1, ("a", 2)] assert len(pad) == 3 ad2 = ad.copy() assert list(ad2.keys()) == list(ad.keys()) assert ad.get("one", None) == "one" assert ad.get("two", False) is False k, v = ad.popitem() assert k == "one" assert v == "one" items = ad.items() assert (1, 1) in items assert (("a", 2), ("a", 2)) in items assert len(items) == 2 values = ad.values() assert 1 in values assert ("a", 2) in values assert len(values) == 2 ad2 = AttrDict() ad2[1] = 3 ad2["one"] = "one" ad2["a"] = "a" ad.update(ad2) assert ad[1] == 3 assert "a" in ad ad.__str__() with pytest.raises(AttributeError): ad.__private_dict__ = None with pytest.raises(AttributeError): ad.some_other_key with pytest.raises(KeyError): ad["__private_dict__"] = None del ad[1] assert 1 not in ad.keys() ad.new_value = "new_value" assert "new_value" in ad.keys() assert ad.new_value == ad["new_value"] for key in ad.keys(): if isinstance(key, str): assert key in dir(ad) new_value = ad.pop("new_value") assert new_value == "new_value" del ad.one assert "one" not in ad.keys() ad.clear() assert list(ad.keys()) == []
[ 9, 864, 553 ]
def METHOD_NAME(self): testrequest = mock.MagicMock() testrequest.build_absolute_uri = lambda path: 'http://testserver{}'.format(path) inviteurl = InviteUrlWithTokenIteratorMock( request=testrequest, private=True, content_object=self.invite_target) self.assertEqual(inviteurl.get_share_url(), 'http://testserver/invite/accept/token1') self.assertEqual(inviteurl.get_share_url(), 'http://testserver/invite/accept/token2') self.assertEqual(inviteurl.get_share_url(), 'http://testserver/invite/accept/token3')
[ 9, 19, 834, 274, 547 ]
def METHOD_NAME(pf_info, driver): """ Get id of PFs :param pf_info: Dict, pfs' info :param driver: str, pfs' driver :return: List, pfs' id, eg. ['0000:05:00.0', '0000:05:00.1'] """ return [pf.get("pci_id") for pf in pf_info.values() if pf.get("driver") == driver]
[ 19, 5290, 147, 245 ]
def METHOD_NAME(text): text = re.sub(r"[éèëēêęěė]", "e", text) text = re.sub(r"[ãâāáäăâàąåạả]", "a", text) text = re.sub(r"[úūüùưûů]", "u", text) text = re.sub(r"[ôōóöõòő]", "o", text) text = re.sub(r"[ćçč]", "c", text) text = re.sub(r"[ïīíîıì]", "i", text) text = re.sub(r"[ñńňņ]", "n", text) text = re.sub(r"[țť]", "t", text) text = re.sub(r"[łľ]", "l", text) text = re.sub(r"[żžź]", "z", text) text = re.sub(r"[ğ]", "g", text) text = re.sub(r"[ř]", "r", text) text = re.sub(r"[ý]", "y", text) text = re.sub(r"[æ]", "ae", text) text = re.sub(r"[œ]", "oe", text) text = re.sub(r"[șşšś]", "s", text) return text
[ 369, 15717 ]
f METHOD_NAME(self):
[ 386 ]
def METHOD_NAME(self, power): self._check_interlock() if power < 0 or power > 1: raise RuntimeError('Error setting laser power: Power must be between 0 and 1') self.power = power p = 0xFFF*power ps = '%03X' %p ret, = self._query('SLP', ps) if not ret == '>': raise RuntimeError('Error setting laser power') #if self.isOn: # self.TurnOn() #turning on actually sets power
[ 0, 1928 ]
def METHOD_NAME(self, tb: str, column_property: ColumnProperty) -> None: column = column_property.columns[0] if column.foreign_keys: self.add_association_proxy_xsd(tb, column_property) return super().METHOD_NAME(tb, column_property)
[ 238, 105, 1042, 8503 ]
def METHOD_NAME(self): """ Return a list of possible templates. If an overriding class sets a template name, we use that. Otherwise, we try 2 options before defaulting to :file:`catalogue/detail.html`: 1. :file:`detail-for-upc-{upc}.html` 2. :file:`detail-for-class-{classname}.html` This allows alternative templates to be provided for a per-product and a per-item-class basis. """ if self.template_name: return [self.template_name] return [ "oscar/%s/detail-for-upc-%s.html" % (self.template_folder, self.object.upc), "oscar/%s/detail-for-class-%s.html" % (self.template_folder, self.object.get_product_class().slug), "oscar/%s/detail.html" % self.template_folder, ]
[ 19, 671, 83 ]
def METHOD_NAME(self): G = nx.Graph() assert _pagerank_scipy(G) == {}
[ 9, 35, 5809 ]
def METHOD_NAME(): for m in [UnitIntervalMesh(5), UnitSquareMesh(2, 2), UnitCubeMesh(2, 2, 2)]: c = Constant(1, domain=m) assert abs(assemble(c*dx(domain=m)) - 1.0) < 1e-10 c.assign(4) assert abs(assemble(c*dx(domain=m)) - 4.0) < 1e-10
[ 9, 1997, 928, 1283 ]
def METHOD_NAME(self): return self.MASK
[ 361 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("QuotaList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(location): """Get APK type.""" with ZipFile(location, 'r') as zf: for fil in zf.namelist(): if fil.endswith('.apk'): return 'apks' return 'apk'
[ 19, 793, 44 ]