text
stringlengths
15
7.82k
ids
sequencelengths
1
7
async def METHOD_NAME(self, key_value_pairs: List[tuple], sort_by: Optional[List[ElasticFiledSort]] = None, limit: int = 100) -> StorageRecords: service = self._get_storage_service() return await service.METHOD_NAME(key_value_pairs, sort_by, limit=limit)
[ 557, 604, 199 ]
def METHOD_NAME(self) -> List[Adapter]: paths = [] for obj_proxy in self._object_manager.get_objects(): proxy = obj_proxy.get_interface('org.bluez.Adapter1') if proxy: assert isinstance(proxy, Gio.DBusProxy) paths.append(proxy.get_object_path()) return [Adapter(obj_path=path) for path in paths]
[ 19, 5645 ]
def METHOD_NAME(self, request, queryset): User.objects.bulk_recover_credit(User.objects.all(), 1, '用户:全体恢复') return self.message_user(request, '操作成功!')
[ 2986 ]
def METHOD_NAME(): assert cirq.NamedQubit('c')._json_dict_() == {'name': 'c'} assert cirq.NamedQid('c', dimension=3)._json_dict_() == {'name': 'c', 'dimension': 3}
[ 9, 24, 763 ]
def METHOD_NAME( tmp_eeprom_file_paths: Generator[Tuple[str, str], None, None], monkeypatch: Any ) -> Generator[None, None, None]: """Set environment variables.""" left, right = tmp_eeprom_file_paths monkeypatch.setenv( "LEFT_OT3_PIPETTE_DEFINITION", json.dumps( { "pipette_name": "p1000_multi", "pipette_model": 34, "pipette_serial_code": "20230609", "eeprom_file_path": left, } ), ) monkeypatch.setenv( "RIGHT_OT3_PIPETTE_DEFINITION", json.dumps( { "pipette_name": "EMPTY", "pipette_model": -1, "pipette_serial_code": "", "eeprom_file_path": right, } ), ) yield monkeypatch.delenv("LEFT_OT3_PIPETTE_DEFINITION") monkeypatch.delenv("RIGHT_OT3_PIPETTE_DEFINITION")
[ 0, 654, 2786, 5302, 485, 1659 ]
def METHOD_NAME(self) -> Optional[Iterator[TableMetadata]]: """ Return an iterator generating TableMetadata for all of the schemas. """ for schema_version in self._get_raw_extract_iter(): subject = schema_version.subject schema = schema_version.schema.raw_schema LOGGER.info((f'Subject: {subject}, ' f'Schema: {schema}')) try: yield KafkaSchemaRegistryExtractor._create_table( schema=schema, subject_name=subject, cluster_name=schema.get( 'namespace', 'kafka-schema-registry' ), schema_name=schema.get('name', ''), schema_description=schema.get('doc', None), ) except Exception as e: logger.warning(f'Failed to generate table for {subject}: {e}') continue
[ 19, 297, 84 ]
def METHOD_NAME(): sap = lssap.Lssap(context_wrap(Lssap_extra_lines)) assert sap is not None
[ 9, 1205, 1967, 513 ]
def METHOD_NAME(self): if not self.apikey or not self.secretkey: with open(self.CMDLINE, 'r') as fd: cmdline = fd.read() for p in cmdline.split(): if 'baremetalnotificationsecuritykey' in p: self.secretkey = p.split("=")[1] if 'baremetalnotificationapikey' in p: self.apikey = p.split("=")[1] if not self.apikey: raise Exception('cannot find baremetalnotificationapikey in %s' % Server.CMDLINE) if not self.secretkey: raise Exception('cannot find baremetalnotificationsecuritykey in %s' % Server.CMDLINE) return self.apikey, self.secretkey
[ 19, 3568 ]
def METHOD_NAME(x,y,t): return (IOC_INOUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
[ 18032 ]
def METHOD_NAME( self: AttnProcessor2_0, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, ): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask( attention_mask, sequence_length, batch_size ) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view( batch_size, attn.heads, -1, attention_mask.shape[-1] ) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) context_k, context_v = apply_hypernetworks(hidden_states, encoder_hidden_states) key = attn.to_k(context_k) value = attn.to_v(context_v) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape( batch_size, -1, attn.heads * head_dim ) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states
[ 820, 1506, 76 ]
def METHOD_NAME(self): """unauthorized access is forbidden""" # get object osarch_api_1 = Osarch.objects.get(osarch_name='osarch_1') # get response response = self.client.get('/api/osarch/' + str(osarch_api_1.osarch_id) + '/') # compare self.assertEqual(response.status_code, 401)
[ 9, 3260, 1801, 58, 3166 ]
def METHOD_NAME(self, iface): """ Bring an iface up. :param iface: Network interface, e.g. eth0 :type iface: string """ subprocess.call([f'/etc/init.d/net.{iface}', 'restart']) subprocess.call(['rc-update', 'add', f'net.{iface}', 'default'])
[ 1 ]
def METHOD_NAME( experiment: config.OfflineExperimentConfig[builders.Networks, Any, Any], *, make_snapshot_models: Optional[config.SnapshotModelFactory[ builders.Networks]] = None, name: str = 'agent', program: Optional[lp.Program] = None) -> lp.Program: """Builds a Launchpad program for running the experiment. Args: experiment: configuration for the experiment. make_snapshot_models: a factory that defines what is saved in snapshots. name: name of the constructed program. Ignored if an existing program is passed. program: a program where agent nodes are added to. If None, a new program is created. Returns: The Launchpad program with all the nodes needed for running the experiment. """ def build_model_saver(variable_source: core.VariableSource): assert experiment.checkpointing environment = experiment.environment_factory(0) spec = specs.make_environment_spec(environment) networks = experiment.network_factory(spec) models = make_snapshot_models(networks, spec) # TODO(raveman): Decouple checkpointing and snahpshotting configs. return snapshotter.JAXSnapshotter( variable_source=variable_source, models=models, path=experiment.checkpointing.directory, add_uid=experiment.checkpointing.add_uid) def build_counter(): counter = counting.Counter() if experiment.checkpointing: counter = savers.CheckpointingRunner( counter, key='counter', subdirectory='counter', time_delta_minutes=experiment.checkpointing.time_delta_minutes, directory=experiment.checkpointing.directory, add_uid=experiment.checkpointing.add_uid, max_to_keep=experiment.checkpointing.max_to_keep, checkpoint_ttl_seconds=experiment.checkpointing.checkpoint_ttl_seconds, ) return counter def build_learner( random_key: networks_lib.PRNGKey, counter: Optional[counting.Counter] = None, ): """The Learning part of the agent.""" dummy_seed = 1 spec = ( experiment.environment_spec or specs.make_environment_spec(experiment.environment_factory(dummy_seed))) # Creates the networks to optimize (online) and target networks. networks = experiment.network_factory(spec) dataset_key, random_key = jax.random.split(random_key) iterator = experiment.demonstration_dataset_factory(dataset_key) # make_demonstrations is responsible for putting data onto appropriate # training devices, so here we apply prefetch, so that data is copied over # in the background. iterator = utils.prefetch(iterable=iterator, buffer_size=1) counter = counting.Counter(counter, 'learner') learner = experiment.builder.make_learner( random_key=random_key, networks=networks, dataset=iterator, logger_fn=experiment.logger_factory, environment_spec=spec, counter=counter) if experiment.checkpointing: learner = savers.CheckpointingRunner( learner, key='learner', subdirectory='learner', time_delta_minutes=5, directory=experiment.checkpointing.directory, add_uid=experiment.checkpointing.add_uid, max_to_keep=experiment.checkpointing.max_to_keep, checkpoint_ttl_seconds=experiment.checkpointing.checkpoint_ttl_seconds, ) return learner if not program: program = lp.Program(name=name) key = jax.random.PRNGKey(experiment.seed) counter = program.add_node(lp.CourierNode(build_counter), label='counter') if experiment.max_num_learner_steps is not None: program.add_node( lp.CourierNode( lp_utils.StepsLimiter, counter, experiment.max_num_learner_steps, steps_key='learner_steps'), label='counter') learner_key, key = jax.random.split(key) learner_node = lp.CourierNode(build_learner, learner_key, counter) learner = learner_node.create_handle() program.add_node(learner_node, label='learner') for evaluator in experiment.get_evaluator_factories(): evaluator_key, key = jax.random.split(key) program.add_node( lp.CourierNode(evaluator, evaluator_key, learner, counter, experiment.builder.make_actor), label='evaluator') if make_snapshot_models and experiment.checkpointing: program.add_node(lp.CourierNode(build_model_saver, learner), label='model_saver') return program
[ 93, 5206, 8024, 2355 ]
def METHOD_NAME(self) -> str: """ (Required only by `compute.ImageIamPolicy`) The policy data generated by a `organizations_get_iam_policy` data source. """ return pulumi.get(self, "policy_data")
[ 54, 365 ]
def METHOD_NAME(self): super().METHOD_NAME() self.att_l.METHOD_NAME() self.att_r.METHOD_NAME() self._cached_edge_index = None self._cached_adj_t = None
[ 656, 386 ]
f METHOD_NAME(O):
[ 697, 59, 11516, 40 ]
def METHOD_NAME(self): """ Test reading IMS10 bulletin format from bytes io object. """ with io.open(self.path_to_ims, "rb") as fh: with io.BytesIO(fh.read()) as buf: buf.seek(0, 0) cat = _read_ims10_bulletin(buf, _no_uuid_hashes=True) assert len(cat) == 1 self._assert_catalog(cat)
[ 9, 62, 2499, 321, 249 ]
def METHOD_NAME(self, split, combine=False, **kwargs): """Load a given dataset split (e.g., train, valid, test).""" def get_path(type, split): return os.path.join(self.args.data, type, split) def make_dataset(type, dictionary): split_path = get_path(type, split) dataset = data_utils.load_indexed_dataset( split_path, self.source_dictionary, self.args.dataset_impl, combine=combine, ) return dataset input0 = make_dataset('input0', self.source_dictionary) assert input0 is not None, 'could not find dataset: {}'.format(get_path(type, split)) input1 = make_dataset('input1', self.source_dictionary) if self.args.init_token is not None: input0 = PrependTokenDataset(input0, self.args.init_token) if input1 is None: src_tokens = input0 else: if self.args.separator_token is not None: input1 = PrependTokenDataset(input1, self.args.separator_token) src_tokens = ConcatSentencesDataset(input0, input1) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(src_tokens)) if self.args.truncate_sequence: src_tokens = TruncateDataset(src_tokens, self.args.max_positions) dataset = { 'id': IdDataset(), 'net_input': { 'src_tokens': RightPadDataset( src_tokens, pad_idx=self.source_dictionary.pad(), ), 'src_lengths': NumelDataset(src_tokens, reduce=False), }, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens, reduce=True), } if self.args.add_prev_output_tokens: prev_tokens_dataset = RightPadDataset( RollDataset(src_tokens, 1), pad_idx=self.dictionary.pad(), ) dataset['net_input'].update( prev_output_tokens=prev_tokens_dataset, ) if not self.args.regression_target: label_dataset = make_dataset('label', self.target_dictionary) if label_dataset is not None: dataset.update( target=OffsetTokensDataset( StripTokenDataset( label_dataset, id_to_strip=self.target_dictionary.eos(), ), offset=-self.target_dictionary.nspecial, ) ) else: label_path = "{0}.label".format(get_path('label', split)) if os.path.exists(label_path): dataset.update( target=RawLabelDataset([ float(x.strip()) for x in open(label_path).readlines() ]) ) nested_dataset = NestedDictionaryDataset( dataset, sizes=[src_tokens.sizes], ) if self.args.no_shuffle: dataset = nested_dataset else: dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) print("| Loaded {0} with #samples: {1}".format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split]
[ 557, 126 ]
def METHOD_NAME(self): self.assertEqual( format_grammar._parse_segments("/* */{ code; }", False), ( ["/* */", None, ""], {0: [format_grammar._CppCode(1, "code;", 5, 0, False)]}, [], ), )
[ 9, 7728, 1887, 573, 1591 ]
def METHOD_NAME(self): password_salt = generate_salt() user = User.create( email='[email protected]', password_hash=create_bcrypt_hash('edit', password_salt), password_salt=password_salt, roles_new=[Role.create( name='pipeline_editor_1', permissions=[ Permission.create( entity=Entity.PIPELINE, entity_id='test', access=4, ) ] )], username='editor', ) access = user.get_access(Entity.ANY) self.assertEqual(4, access) access = user.get_access(Entity.GLOBAL, None) self.assertEqual(0, access) access = user.get_access(Entity.PIPELINE, 'test') self.assertEqual(4, access) access = user.get_access(Entity.PIPELINE, 'not_test') self.assertEqual(0, access) self.assertTrue(has_at_least_viewer_role(user, Entity.PIPELINE, 'test')) self.assertTrue(has_at_least_editor_role(user, Entity.PIPELINE, 'test'))
[ 9, 19, 21, 1089, 43, 1148, 2977 ]
def METHOD_NAME(app): app.connect("autodoc-process-docstring", remove_lines_before_parameters)
[ 102 ]
def METHOD_NAME(self, opt_dict): r""" Plot protocol for Gf objects with a MeshImTime. Parameters ---------- opt_dict: dictionary Can contain: - mode: string, default None Mode to plot the Green's function in: -- 'R': real part only -- 'I': imaginary part only - x_window: tuple, default None (xmin,xmax) - name: str name of the gf for the label and legend """ opt_dict['linestyle'] = 'None' if not 'marker' in opt_dict: opt_dict['marker'] = 'x' return plot_base.plot_base( self, opt_dict, r'$\tau$', lambda x : r'%s$(\tau)$'%x, list(self.mesh.values()))
[ 7389, 7391 ]
def METHOD_NAME(series_data): """Ensure warning is raised by unequal sampling frequencies""" firstarr, secondarr, seglen = series_data f_s = 0.001 first = TimeSeries(firstarr, sample_rate=f_s) second = TimeSeries(secondarr, sample_rate=f_s * 2.32) with pytest.warns(UserWarning, match="Sampling frequencies are unequal"): spectral.coherence(first, second, segmentlength=seglen) with pytest.raises(ValueError): spectral.coherence(first, second, segmentlength=seglen, downsample=False) coh1 = spectral.coherence(first, second, segmentlength=seglen) coh2 = spectral.coherence(first, second, segmentlength=seglen, downsample=True) assert all(np.array(coh1.data) == np.array(coh2.data))
[ 9, 16158, 4182, 718 ]
def METHOD_NAME(self, layername, grassmapname, force=False): """ Put an existing map from GRASS to the layer collection @param string name of the layer @param string name of an existing GRASS map layer @param boolean optional, whether to overwrite values if key exists """ # fill the new grass array with the contents from the map (must exist) if grassmapname in grass.list_strings("rast"): layer = garray.array(grassmapname) self.grassmapnames[layername] = grassmapname self.setlayer(layername, layer, force) else: raise error.DataError( Grassland.ME, "Grass Map was missing: " + grassmapname )
[ -1 ]
def METHOD_NAME(obj) -> None: # Set magic token for check_api_annotations linter. if hasattr(obj, "__name__"): obj._annotated = obj.__name__
[ 1743, 9109 ]
def METHOD_NAME(name, rawtext, text, lineno, inliner, options={}, content=[]): section = int(name[-1]) page = None for man in man_pages: if man[1] == text and man[4] == section: page = man[0] break if page == None: page = "man7/flux-undocumented" section = 7 node = docutils.nodes.reference( rawsource=rawtext, text=f"{text}({section})", refuri=f"../{page}.html", **options, ) return [node], []
[ 4837, 1018 ]
def METHOD_NAME(): assert pixel(ImageMath.eval("A%A", A=A)) == "I 0" assert pixel(ImageMath.eval("B%B", B=B)) == "I 0" assert pixel(ImageMath.eval("A%B", A=A, B=B)) == "I 1" assert pixel(ImageMath.eval("B%A", A=A, B=B)) == "I 0" assert pixel(ImageMath.eval("Z%A", A=A, Z=Z)) == "I 0" assert pixel(ImageMath.eval("Z%B", B=B, Z=Z)) == "I 0"
[ 9, 808, 692 ]
def METHOD_NAME( balance_data: Iterable[Tuple[GiftCard, float]], order: "Order", user: Optional[User], app: Optional[App], ): events = [ GiftCardEvent( gift_card=gift_card, user=user, app=app, order=order, type=GiftCardEvents.USED_IN_ORDER, parameters={ "balance": { "currency": gift_card.currency, "current_balance": gift_card.current_balance.amount, "old_current_balance": previous_balance, }, }, ) for gift_card, previous_balance in balance_data ] return GiftCardEvent.objects.bulk_create(events)
[ 4755, 4756, 1304, 623, 852, 417 ]
def METHOD_NAME(resource_group_name: Optional[pulumi.Input[str]] = None, test_base_account_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTestBaseAccountResult]: """ Gets a Test Base Account. Azure REST API version: 2022-04-01-preview. :param str resource_group_name: The name of the resource group that contains the resource. :param str test_base_account_name: The resource name of the Test Base Account. """ ...
[ 19, 9, 414, 598, 146 ]
def METHOD_NAME(self): if self._obj.References != self._selectionWidget.references: self._obj.References = self._selectionWidget.references self._applyWidgetChanges() self._obj.Document.recompute() FreeCADGui.ActiveDocument.resetEdit() self._restoreVisibility() return True
[ 1437 ]
def METHOD_NAME(cls, crawler, *args, **kwargs): spider = super().METHOD_NAME(crawler, *args, **kwargs) crawler.signals.connect(spider.spider_idle, signal=scrapy.signals.spider_idle) return spider
[ 280, 8777 ]
def METHOD_NAME(self, value): """One function to control them all. Enables or disables all groups.""" self.grpSettings.setEnabled(value) self.grpPreview.setEnabled(value)
[ 0, 1551, 1111 ]
def METHOD_NAME(ds, wg, pool): original_io = ds.index.io ds.index.io = IOHandlerRemote(ds, wg, pool) yield ds.index.io.terminate() ds.index.io = original_io
[ 2437, 249 ]
def METHOD_NAME(self, participation): return format_html( '<td class="user-points"><a href="{url}">{points}<div class="solving-time">{cumtime}</div></a></td>', url=reverse('contest_all_user_submissions', args=[self.contest.key, participation.user.user.username]), points=floatformat(participation.score, -self.contest.points_precision), cumtime=nice_repr(timedelta(seconds=participation.cumtime), 'noday') if self.config['cumtime'] else '', )
[ 52, 2053, 1571 ]
def METHOD_NAME(self): def rolling_trend(datetime, numeric): x = pd.Series(numeric.values, index=datetime.values) return apply_rolling_agg_to_series( x, calculate_trend, self.window_length, self.gap, self.min_periods, ) return rolling_trend
[ 19, 559 ]
def METHOD_NAME(self, rtype=None, name=None, content=None): payload = self._get(f"/zones/{self.domain_id}/records") records = [] for record in payload: if "id" in record: processed_record = { "id": record["id"], "type": record["type"], "name": self._full_name(record["name"]), "ttl": record["ttl"], "content": record["content"], } records.append(processed_record) if rtype: records = [record for record in records if record["type"] == rtype] if name: records = [ record for record in records if record["name"] == self._full_name(name) ] if content: records = [record for record in records if record["content"] == content] LOGGER.debug("list_records: %s", records) return records
[ 245, 2530 ]
async def METHOD_NAME(model, object_id): if not hasattr(model, "metadata"): raise Exception("Model does not support metadata") obj = await utils.database.get_object(model, object_id, raise_exception=False) if obj is None: raise Exception("Object not found") return obj
[ 19, 61, 250, 1094 ]
def METHOD_NAME(uvector): match = None for test_func in list(uvector_mappings.keys()): if test_func(uvector): match = uvector_mappings[test_func] return numpy.array(match[0](uvector), dtype=match[1]) else: raise ValueError( "unsupported uvector data type for conversion to numpy array %s" % (uvector))
[ 17878, 24, 2028 ]
def METHOD_NAME(self, test): doc_first_line = test.shortDescription() if self.descriptions and doc_first_line: return '\n'.join((str(test), doc_first_line)) else: return str(test)
[ 19, 1067 ]
def METHOD_NAME(schedules, fmt, dorm="Unknown"): # TODO something something globals file = fmt.format(re.sub("\s", "-", dorm.lower().strip())) print("Rendering schedule for dorm {} to file {}".format(dorm, file)) with open(file, "w") as file: yaml.dump(schedules_to_document(schedules), file, default_flow_style=None)
[ 338, 507, 24, 171 ]
def METHOD_NAME(self, class_type, name, value): return _swig_setattr_nondynamic(self, class_type, name, value, 0)
[ 5438, 5439 ]
def METHOD_NAME(self): return { "test_statistic_merging": { "split_concat": {"model": self._get_split_concat_backend_model}, "shared_conv": {"model": self._get_shared_conv_model}, } }
[ 9, 434 ]
def METHOD_NAME(self): self._last = len(self._warnings)
[ 656 ]
def METHOD_NAME(): # Simple test to try and quickly overfit the span_finder component - ensuring the ML models work correctly fix_random_seed(0) nlp = English() span_finder = nlp.add_pipe("span_finder", config={"spans_key": SPANS_KEY}) train_examples = make_examples(nlp) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert span_finder.model.get_dim("nO") == 2 for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["span_finder"] < 0.001 # test the trained model test_text = "I like London and Berlin" doc = nlp(test_text) spans = doc.spans[SPANS_KEY] assert len(spans) == 3 assert set([span.text for span in spans]) == { "London", "Berlin", "London and Berlin", } # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) spans2 = doc2.spans[SPANS_KEY] assert len(spans2) == 3 assert set([span.text for span in spans2]) == { "London", "Berlin", "London and Berlin", } # Test scoring scores = nlp.evaluate(train_examples) assert f"spans_{SPANS_KEY}_f" in scores # It's not perfect 1.0 F1 because it's designed to overgenerate for now. assert scores[f"spans_{SPANS_KEY}_p"] == 0.75 assert scores[f"spans_{SPANS_KEY}_r"] == 1.0 # also test that the spancat works for just a single entity in a sentence doc = nlp("London") assert len(doc.spans[SPANS_KEY]) == 1
[ 9, -1, 249 ]
def METHOD_NAME(self,(exitcode, stdout, stderr)): if exitcode == PULSE2_SUCCESS_ERROR: # success self.logger.info("Circuit #%s: pull %s done (exitcode == 0)" % (self.coh.id, self.name)) self.update_history_done(exitcode, stdout, stderr) if self.coh.isStateStopped(): return DIRECTIVE.KILLED if self.phase.switch_to_done(): return self.next() return self.give_up() elif self.name in self.config.non_fatal_steps: self.logger.info("Circuit #%s: pull %s failed (exitcode != 0), but non fatal according to scheduler config file" % (self.coh.id, self.name)) self.update_history_failed(exitcode, stdout, stderr) self.phase.set_done() return self.next() else: # failure: immediately give up self.logger.info("Circuit #%s: pull %s failed (exitcode != 0)" % (self.coh.id, self.name)) self.update_history_failed(exitcode, stdout, stderr) return self.switch_phase_failed()
[ 214, 545, 3200, 1571 ]
def METHOD_NAME(self, api: _Api) -> list[_HubSpotResult]: results = api.get_all() return [_HubSpotResult(**elem.to_dict()) for elem in results]
[ 1047, 75 ]
def METHOD_NAME(self): s = """ try: x except E as N: y""" self.validate(s)
[ 9, 80 ]
def METHOD_NAME(): pd = PackageDependency("pytest", None) pd.fill_missing_version() assert pd.version == pytest.__version__
[ 9, 360, 2913, 1917, 1038, 281 ]
def METHOD_NAME(): # test normal def func1(input0, input1, param0=5, param1=7): pass inputs, params = get_inputs_and_params(func1) for index, _input in enumerate(inputs): assert isinstance(_input, Parameter) assert _input.name == f"input{index}" assert _input.kind == _input.POSITIONAL_OR_KEYWORD assert _input.default == Parameter.empty default = [5, 7] for index, param in enumerate(params): assert isinstance(param, Parameter) assert param.name == f"param{index}" assert param.kind == param.POSITIONAL_OR_KEYWORD assert param.default == default[index] # Error MESSAGE = re.escape("Signature must not have *args or **kwargs") def func2(input0, input1, *args, param0=5, param1=7): pass def func3(input0, input1, param0=5, param1=7, **kwargs): pass with pytest.raises(ValueError, match=MESSAGE): get_inputs_and_params(func2) with pytest.raises(ValueError, match=MESSAGE): get_inputs_and_params(func3)
[ 9, 19, 1461, 61, 434 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "resourceName", self.ctx.args.vault_name, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(): '''Test that the setter raises the expected exceptions.''' shape_values = ["gh_evaluator", "gh_quadrature_face"] shapes_metadata = ShapesMetadata(shape_values) with pytest.raises(TypeError) as info: shapes_metadata.shapes = "invalid" assert ("ShapesMetadata values should be provided as a list but found " "'str'." in str(info.value)) with pytest.raises(TypeError) as info: shapes_metadata.shapes = [] assert ("The ShapesMetadata list should contain at least one entry, but " "it is empty." in str(info.value)) with pytest.raises(TypeError) as info: shapes_metadata.shapes = [None] assert ("The ShapesMetadata list should be a list containing objects of " "type str but found 'None', which is of type 'NoneType'." in str(info.value)) with pytest.raises(ValueError) as info: shapes_metadata.shapes = ["invalid"] assert ("The 'shape' metadata should be a recognised value (one of " "['gh_quadrature_xyoz', 'gh_quadrature_face', " "'gh_quadrature_edge', 'gh_evaluator']) but found 'invalid'." in str(info.value))
[ 9, 800, 1096 ]
def METHOD_NAME(self, method_name): """ Check whether a feature / endpoint is supported.""" # Field: endpoints > ... pass
[ 220, 2247 ]
def METHOD_NAME(self, request, view): # Workaround to ensure DjangoModelPermissions are not applied # to the root view when using DefaultRouter. if getattr(view, '_ignore_rbac_permissions', False): return True if not request.user: return False if request.user.is_anonymous and self.authenticated_users_only: return False raw_action = getattr(view, 'raw_action', request.method) if raw_action in ['metadata', 'OPTIONS']: return True perms = self.get_require_perms(request, view) if isinstance(perms, str): perms = [perms] has = request.user.has_perms(perms) logger.debug('View require perms: {}, result: {}'.format(perms, has)) return has
[ 220, 204 ]
def METHOD_NAME(pretty_name, cmd): testDivider() print(pretty_name) testDivider() cmdLine = ' '.join(cmd) print(cmdLine) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) o, e = proc.communicate() print(o.decode('ascii')) print(e.decode('ascii')) errorCode = proc.returncode if errorCode != 0: raise Exception("Operation failed with the return code: " + str(errorCode))
[ 1005, 1660 ]
f METHOD_NAME(cls):
[ 0, 1, 2 ]
def METHOD_NAME(self, epoch): if self.eval_dataloader is not None: self.model.eval() dist, on, cnt = 0, 0, 0 with torch.no_grad(): for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader: chosen_ids = chosen_ids.squeeze(1).to(torch.cuda.current_device()) c_mask = c_mask.squeeze(1).to(torch.cuda.current_device()) reject_ids = reject_ids.squeeze(1).to(torch.cuda.current_device()) r_mask = r_mask.squeeze(1).to(torch.cuda.current_device()) chosen_reward = self.model(chosen_ids, attention_mask=c_mask) reject_reward = self.model(reject_ids, attention_mask=r_mask) for i in range(len(chosen_reward)): cnt += 1 if chosen_reward[i] > reject_reward[i]: on += 1 dist += (chosen_reward - reject_reward).mean().item() self.dist = dist / len(self.eval_dataloader) self.acc = on / cnt if is_rank_0(): log = pd.DataFrame( [[(epoch + 1) * len(self.train_dataloader), self.loss.item(), self.dist, self.acc]], columns=['step', 'loss', 'dist', 'acc'] ) log.to_csv('log.csv', mode='a', header=False, index=False)
[ 1171 ]
def METHOD_NAME(server=None): """Returns the default config of the operator. This config can then be changed to the user needs and be used to instantiate the operator. The Configuration allows to customize how the operation will be processed by the operator. Parameters ---------- server : server.DPFServer, optional Server with channel connected to the remote or local instance. When ``None``, attempts to use the global server. """ return Operator.METHOD_NAME(name="accumulate", server=server)
[ 235, 200 ]
def METHOD_NAME(self, event): button = event["button"] if button == self.button_open: self.py3.command_run(f"xdg-open {self.project}") if button != self.button_refresh: self.py3.prevent_refresh()
[ 69, 212 ]
def METHOD_NAME(self): self.devnull.close() sys.argv = self.SYSARGV[:] sys.stderr = self.STDERR pyftpdlib.servers.FTPServer = self.original_ftpserver_class super().METHOD_NAME()
[ 531, 481 ]
def METHOD_NAME(text): text = re.sub(r'\s+', ' ', text) text = text.strip() return text
[ 1859, 1356 ]
def METHOD_NAME(self): self.assertIsInstance(Article.objects, ArticleManager)
[ 9, 578, 722, 2 ]
def METHOD_NAME(string): # this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR # 'as few symbols as possible (0 upwards) between an asterisk and the end of the string' return re.sub('\*[^\*]*?(\*|$)', '', string)
[ 188, -1, 2107 ]
def METHOD_NAME(os_path, name): return get_meta_map(os_path)[name]
[ 19, 1094 ]
def METHOD_NAME(self, batch, *args, **kwargs): batch = SampleList(batch) loader = self.get_loader(batch.dataset_type) return loader.METHOD_NAME(batch)
[ 123, 2277 ]
def METHOD_NAME(client): mindsdb_data = { 'database': { 'engine': 'postgres', 'parameters': {} } } response = client.post('/api/databases', json=mindsdb_data, follow_redirects=True) assert '400' in response.status
[ 9, 129, 463, 654, 156, 2039 ]
async def METHOD_NAME(self) -> None: await self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(gds: GraphDataScience) -> None: nx_G = nx.Graph() nx_G.add_node(1, labels="N", time=1) nx_G.add_node(42, labels=["N", "M"], time=2) nx_G.add_node(1337, labels=["O"], time=3) nx_G.add_node(2, labels=["O"], time=10) nx_G.add_edge(1, 42, weight=0.4) nx_G.add_edge(1, 1337, weight=1.4) nx_G.add_edge(42, 1337, weight=0.1) nodes, rels = gds.graph.networkx._parse(nx_G) assert len(nodes) == 3 assert_frame_equal(nodes[0], DataFrame({"labels": [["N"]], "nodeId": [1], "time": [1]})) assert_frame_equal(nodes[1], DataFrame({"labels": [["M", "N"]], "nodeId": [42], "time": [2]})) assert_frame_equal(nodes[2], DataFrame({"labels": [["O"], ["O"]], "nodeId": [1337, 2], "time": [3, 10]})) assert len(rels) == 1 assert_frame_equal( rels[0], DataFrame( { "relationshipType": ["R"] * 3, "sourceNodeId": [1, 1, 42], "targetNodeId": [42, 1337, 1337], "weight": [0.4, 1.4, 0.1], } ), )
[ 9, 214, 529, 119 ]
def METHOD_NAME(get_connection, get_conn, operation, expected): get_connection.return_value = CONN dag_id = "sftp_dag" task_id = "sftp_task" task = SFTPOperator( task_id=task_id, ssh_conn_id=CONN_ID, dag=DAG(dag_id), start_date=timezone.utcnow(), local_filepath=LOCAL_FILEPATH, remote_filepath=REMOTE_FILEPATH, operation=operation, ) task_metadata = SFTPExtractor(task).extract() assert task_metadata.name == f"{dag_id}.{task_id}" assert task_metadata.inputs == expected[0] assert task_metadata.outputs == expected[1]
[ 9, 297, 1264, 4579, 147 ]
def METHOD_NAME(self, card, comment): self.pmass.add(card, comment)
[ 238, 18176 ]
def METHOD_NAME(self): return self.cpu_utilization
[ 19, 2265, 5898 ]
def METHOD_NAME(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.METHOD_NAME() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].METHOD_NAME()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
[ 24, 553 ]
def METHOD_NAME( spalloc_server, spalloc_port=22244, spalloc_user="unknown user"): """ Parse a URL to the old-style service. This may take the form: spalloc://[email protected]:22244 The leading ``spalloc://`` is the mandatory part (as is the actual host name). If the port and user are omitted, the defaults given in the other arguments are used (or default defaults). A bare hostname can be used instead. If that's the case (i.e., there's no ``spalloc://`` prefix) then the port and user are definitely used. :param str spalloc_server: Hostname or URL :param int spalloc_port: Default port :param str spalloc_user: Default user :return: hostname, port, username :rtype: tuple(str,int,str) """ if spalloc_port is None or spalloc_port == "": spalloc_port = 22244 if spalloc_user is None or spalloc_user == "": spalloc_user = "unknown user" parsed = urlparse(spalloc_server, "spalloc") if parsed.netloc == "": return spalloc_server, spalloc_port, spalloc_user return parsed.hostname, (parsed.port or spalloc_port), \ (parsed.username or spalloc_user)
[ 214, 2228, -1 ]
def METHOD_NAME(interface, custom_params): """ Enable FCoE on specified interface by coping default configuration Change parameters specified in custom_params argument """ # Map parameters to configuration mane # Check man fcoe-utils to add more parameters filename = _get_config_name(interface) config = _parse_config(_custom_parameter_to_config(custom_params)) with open(filename, 'w') as outfile: outfile.write(CONFFILE_HEADER + "\n") for name, value in six.iteritems(config): outfile.write('%s="%s"\n' % (name, value))
[ 111 ]
async def METHOD_NAME(context): """ When the connection breaks while the server is calling protocol.send from somewhere other than the main read - response loop. Make sure that this still triggers the proper connection cleanup. """ srv, ctx = context _, writer = await asyncio.open_connection(*srv.sockets[0].getsockname()) writer.close() # Need this sleep for test to work, otherwise closed protocol isn't detected await asyncio.sleep(0) proto = next(iter(ctx.connections.values())) proto.writer.transport.set_write_buffer_limits(high=0) # Might raise DisconnectedError depending on OS with contextlib.suppress(DisconnectedError): await proto.send_message({"command": "Some long message" * 4096}) await asyncio.sleep(0.1) assert len(ctx.connections) == 0
[ 9, 550, 31, 751 ]
def METHOD_NAME( self, traceback_mock, echo_mock ): dummy_ebcli_app = MagicMock() dummy_ebcli_app.setup = MagicMock(side_effect=TestEbRun.MyDummyEBCLIException('My Exception Message')) with patch.object(sys, 'argv', ['--debug']): ebrun.run_app(dummy_ebcli_app) echo_mock.side_effect = [ traceback_mock, 'INFO: My Exception Message' ] dummy_ebcli_app.close.assert_called_once_with(code=4)
[ 9, 7021, -1, 442, 41, 290, 584 ]
def METHOD_NAME(self): self.assertEqual( "Sheet Pan Strawberry Shortcake Recipe", self.harvester_class.title() )
[ 9, 2893 ]
def METHOD_NAME(self): assert convert.bytes_to_unit("kb", None) is None assert convert.bytes_to_unit("kB", 100) == pytest.approx(0.098, rel=0.1) assert convert.bytes_to_unit("kB", 4500) == pytest.approx(4.4, rel=0.1) assert convert.bytes_to_unit("kB", 8004200) == pytest.approx(7800, rel=0.1) assert convert.bytes_to_unit("kB", 12348004200) == pytest.approx(12000000, rel=0.1)
[ 9, 24, 986 ]
async def METHOD_NAME(groupNumber: int, platformName: str, target: str): try: await config.del_subscribe(TargetQQGroup(group_id=groupNumber), target, platformName) except (NoSuchUserException, NoSuchSubscribeException): raise HTTPException(status.HTTP_400_BAD_REQUEST, "no such user or subscribe") return StatusResp(ok=True, msg="")
[ 1269, 846, 1066 ]
def METHOD_NAME(self): outputs = [1.0, 0.4, 0.5625, 0.0, 0.6666666666667, 0.6666666666667, 0.0, 0.0, 0.0, 1.0, 0.5] for (y_true, y_pred), expected in zip(_KAPPA_INPUTS, outputs): yield self.check_kappa, y_true, y_pred, None, False, expected # Swap y_true and y_pred and test again for (y_pred, y_true), expected in zip(_KAPPA_INPUTS, outputs): yield self.check_kappa, y_true, y_pred, None, False, expected
[ 9, 7771, 6317 ]
def METHOD_NAME(en_tokenizer): doc = en_tokenizer(" This is a cat.") assert doc[0].idx == 0 assert len(doc[0]) == 3 assert doc[1].idx == 3
[ 9, -1 ]
def METHOD_NAME( self, past_target: torch.Tensor, past_observed_values: torch.Tensor, ) -> Tuple[Tuple[torch.Tensor, ...], torch.Tensor, torch.Tensor]: # scale the input past_target_scaled, loc, scale = self.scaler( past_target, past_observed_values ) lags = lagged_sequence_values( self.lags_seq, past_target_scaled[:, : -self.context_length, ...], past_target_scaled[:, -self.context_length :, ...], dim=-1, ) # add loc and scale to past_target_patches as additional features log_abs_loc = loc.abs().log1p() log_scale = scale.log() expanded_static_feat = unsqueeze_expand( torch.cat([log_abs_loc, log_scale], dim=-1), dim=1, size=lags.shape[1], ) inputs = torch.cat((lags, expanded_static_feat), dim=-1) # project patches enc_in = self.patch_proj(inputs) embed_pos = self.positional_encoding(enc_in.size()) # transformer encoder with positional encoding enc_out = self.encoder(enc_in + embed_pos) # flatten and project to prediction length * d_model flatten_out = self.flatten(enc_out.flatten(start_dim=1)) # project to distribution arguments distr_args = self.args_proj( flatten_out.reshape(-1, self.prediction_length, self.d_model) ) return distr_args, loc, scale
[ 76 ]
def METHOD_NAME(self, speed): """ Sets the fan speed Args: speed: An integer, the percentage of full fan speed to set fan to, in the range 0 (off) to 100 (full speed) Returns: A boolean, True if speed is set successfully, False if not """ print("Setting Fan speed is not allowed") return False
[ 0, 1942 ]
def METHOD_NAME(Spotfinder,key): print() image = Spotfinder.images[key] canonical_info = [ ("%s","File",pretty_filename(Spotfinder,key)), ] canonical_info.extend(key_safe_items(image)) optionally_add_saturation(canonical_info,image) for item in canonical_info: if item[2]==None: print("%25s : None"%item[1]) else: print("%25s : %s"%(item[1],item[0]%item[2]))
[ 885, 660, 577 ]
def METHOD_NAME(): """Redefine this fixture to register tasks with the test Celery app.""" return []
[ 5595, 2, 620 ]
def METHOD_NAME(): """Program entry point""" parser = make_option_parser() options = parser.parse_args() init_generic_logging( logfile=LOG_FILE, stderr=options.stderr, stdout=True, read_config=True, ) if options.l2 or options.vlan: # protect against multiple invocations of long-running jobs verify_singleton() if options.l2: do_layer2_detection() if options.vlan: if options.include_vlans: vlans = [int(v) for v in options.include_vlans] else: vlans = [] do_vlan_detection(vlans) delete_unused_prefixes() delete_unused_vlans()
[ 57 ]
def METHOD_NAME(error: grpc.aio.AioRpcError) -> Optional[str]: """Return formatted string of the trailing metadata if exists otherwise return None :param error: AioRpcError :return: string of Metadata or None """ if type(error) == grpc.aio.AioRpcError: trailing_metadata = error.trailing_metadata() if trailing_metadata and len(trailing_metadata): return f'trailing_metadata={trailing_metadata}' return None
[ 297, 4812, 773 ]
def METHOD_NAME(self): p = self._getProcessor() assert p.getJob()
[ 9, 2422, 19, 202 ]
f METHOD_NAME(self):
[ 9, 553, 146 ]
def METHOD_NAME(self): assert ( not self.require_copyright_holder or self.copyright_holder != "" ), "Assertion Failed: {} License requires a copyright holder".format( self.license_id ) assert isinstance( self.copyright_holder, str ), "Assertion Failed: Copyright holder must be a string"
[ 187 ]
def METHOD_NAME(): shape = (200, 300) wcs = make_wcs(shape) gwcs_obj = make_gwcs(shape) sc1 = wcs.pixel_to_world((50, 75), (50, 100)) sc2 = gwcs_obj.pixel_to_world((50, 75), (50, 100)) assert_allclose(sc1.ra, sc2.ra) assert_allclose(sc1.dec, sc2.dec)
[ 9, 93, 7807, 979 ]
def METHOD_NAME(self, pulses): pulses_df = pd.DataFrame([self.otx.get_pulse_details(pulse.get('id')) for pulse in pulses.get('pulse_info').get('pulses')]) return pulses_df
[ 662, 2051, 604, 1287, 24, 3344, 2057 ]
def METHOD_NAME(caplog): record = [] class BrokenInstrument: def task_scheduled(self, task): record.append("scheduled") raise ValueError("oops") def close(self): # Shouldn't be called -- tests that the instrument disabling logic # works right. record.append("closed") # pragma: no cover async def main(): record.append("main ran") return _core.current_task() r = TaskRecorder() main_task = _core.run(main, instruments=[r, BrokenInstrument()]) assert record == ["scheduled", "main ran"] # the TaskRecorder kept going throughout, even though the BrokenInstrument # was disabled assert ("after", main_task) in r.record assert ("after_run",) in r.record # And we got a log message exc_type, exc_value, exc_traceback = caplog.records[0].exc_info assert exc_type is ValueError assert str(exc_value) == "oops" assert "Instrument has been disabled" in caplog.records[0].message
[ 9, 7180, 2653 ]
def METHOD_NAME() -> Dict[str, str]: with open(CURRENT_DIR / "un_sdg.sources.json", "r") as f: sources = json.load(f) return cast(Dict[str, str], sources)
[ 557, 1356, 1458, 445 ]
def METHOD_NAME(self): """Cleanup""" self._s1ap_wrapper.cleanup()
[ 531, 481 ]
def METHOD_NAME() -> None: assert util.match_host('192.168.0.1:80', '192.168.0.1:80') is True assert util.match_host('192.168.0.1:80', '192.168.0.1') is True assert util.match_host('192.168.0.1:80', '192.168.0.1:8080') is False assert util.match_host('192.168.0.1', '192.168.0.2') is False assert util.match_host('192.168.0.1', '192.168.*.*') is True assert util.match_host('alice', 'alice') is True assert util.match_host('alice:80', 'alice') is True assert util.match_host('alice', 'bob') is False assert util.match_host('foo.example.com', 'foo.example.com.net') is False assert util.match_host('alice', '*') is True assert util.match_host('alice', '*:*') is True assert util.match_host('alice:80', '*') is True assert util.match_host('alice:80', '*:80') is True assert util.match_host('alice:8080', '*:80') is False
[ 9, 590, 1806 ]
def METHOD_NAME(): test_support.requires('network') test_support.run_unittest(CreationTestCase, TimeoutTestCase)
[ 9, 57 ]
def METHOD_NAME(self): # http://bob.nem.ninja:8765/#/aggregate/1fbdae5ba753e68af270930413ae90f671eb8ab58988116684bac0abd5726584 m = _create_msg(NEM_NETWORK_TESTNET, 6542254, 40000000, 6545854, 4, 2) t = serialize_aggregate_modification( m.transaction, m.aggregate_modification, unhexlify( "6bf7849c1eec6a2002995cc457dc00c4e29bad5c88de63f51e42dfdcd7b2131d" ), ) write_cosignatory_modification( t, 1, unhexlify( "5f53d076c8c3ec3110b98364bc423092c3ec2be2b1b3c40fd8ab68d54fa39295" ), ) write_cosignatory_modification( t, 1, unhexlify( "9eb199c2b4d406f64cb7aa5b2b0815264b56ba8fe44d558a6cb423a31a33c4c2" ), ) write_cosignatory_modification( t, 1, unhexlify( "94b2323dab23a3faba24fa6ddda0ece4fbb06acfedd74e76ad9fae38d006882b" ), ) write_cosignatory_modification( t, 1, unhexlify( "d88c6ee2a2cd3929d0d76b6b14ecb549d21296ab196a2b3a4cb2536bcce32e87" ), ) write_minimum_cosignatories(t, 2) self.assertEqual( hashlib.sha3_256(t, keccak=True).digest(), unhexlify( "1fbdae5ba753e68af270930413ae90f671eb8ab58988116684bac0abd5726584" ), )
[ 9, 13369, 1853, 3428, 1628, 1821, 194 ]
METHOD_NAME(op, iOutput):
[ 837, 838, 144, 101 ]
def METHOD_NAME(): return generate_token()
[ 567, 1419 ]
def METHOD_NAME(self, name_id): """ :param name_id: The subject identifier, a NameID instance """ del self._db[code(name_id)] if self._sync: try: self._db.sync() except AttributeError: pass
[ 34 ]