text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(tensor_parallel_degree, tensor_parallel_rank, hidden_size, intermediate_size): def fn(x): if x is None: return None x = merge_o_tensor_parallel_weight( x, tensor_parallel_degree=tensor_parallel_degree, hidden_size=hidden_size, intermediate_size=intermediate_size, ) return x return fn
[ 1857, 5786, 411, 667 ]
def METHOD_NAME(self): parent_path = os.path.dirname(os.path.realpath(__file__)) project_location = os.path.join(parent_path, 'res_test_red_library_autodiscover', 'python_libs') python_paths, class_paths = _collect_source_paths(project_location, False) self.assertEqual(python_paths, [project_location, os.path.join(project_location, 'a'), os.path.join(project_location, 'other')]) self.assertEqual(class_paths, [])
[ 9, 217, 612, 3336, 472, 130, 12265 ]
def METHOD_NAME(nlp_doc): negation_tokens = list(filter(lambda tok: tok.dep_ == "neg", nlp_doc)) negated_entities = list(filter(lambda tok: tok._.negex, nlp_doc.ents)) return { "negation_words": len(negation_tokens), "negated_entities": len(negated_entities)}
[ 2991, 5245, 1473, 61, 5399 ]
def METHOD_NAME(self, lines): source_unit = None comment = "" if not isinstance(lines, list): lines = lines.split(b"\n") for lineoffset, line in enumerate(lines): if line.endswith(b"\r"): self.eol = "\r\n" line = line.decode(self.encoding).rstrip("\n").rstrip("\r") if lineoffset == 0 and line == "## active ##": self.is_active = True continue header_meta_data = ( line.startswith("## ") and not line.startswith("## TAG") and not line.startswith("## MAX_LENGTH") ) if header_meta_data: self._headers.append(line) continue if len(line) == 0 and not source_unit: if len(self.units) == 0: self._headers.append(line) # Append blank lines to header # else skip blank lines continue if source_unit: # If we have a source_unit get the target source_unit.rawtarget = line if line != source_unit.source: source_unit.target = strip_ok(line) else: source_unit.target = "" source_unit = None continue is_comment = line.startswith("#") and ( not line.startswith("##") or line.startswith("## TAG") or line.startswith("## MAX_LENGTH") ) if is_comment: # Read comments, *including* meta tags (e.g. '## TAG') comment += line[1:].strip() + "\n" if line.startswith(";"): source_unit = self.addsourceunit(line[1:]) source_unit.eol = self.eol source_unit.addlocation( "%s:%d" % (self.filename[len(self.location_root) :], lineoffset + 1) ) if comment is not None: source_unit.addnote(comment[:-1], "developer") comment = ""
[ 214 ]
def METHOD_NAME(minimal_swagger_spec): response_spec = { 'description': 'Address', 'schema': { 'type': 'object', 'properties': { 'first_name': { 'type': 'string', }, 'last_name': { 'type': 'string', }, }, }, } op = Operation( minimal_swagger_spec, '/foo', 'get', op_spec={'produces': ['application/json']}, ) response = Mock( spec=OutgoingResponse, content_type='application/json', json=Mock( spec=dict, return_value={ 'first_name': 'darwin', 'last_name': 'niwrad', }, ), ) validate_response_body(op, response_spec, response)
[ 9, 1434, 763, 17 ]
def METHOD_NAME(self): self.assertEqual( app_has_been_submitted_to_in_last_30_days(self.domain, self.app_id), True)
[ 9, 991, 220, 8615, 4912, 24, 623 ]
def METHOD_NAME(): im = RatioImage() im.prepare_image(data, 1) im.image() data_out = get_arr_from_imager(im, data.y) im.xpaset("quit") # All values but the first will be 1, because the first # model pixel will be zero, and therefore the ratio function # reassigns the ratio there to be one. expval = np.ones(data.y.shape) expval[0, 0] = 0 assert data_out == pytest.approx(expval)
[ 9, 4517, 660 ]
def METHOD_NAME(self): """ Check that nothing is updated if orc8r is unreachable """ rules_by_sid = {} rules_by_basename = { "bn1": ChargingRuleNameSet( RuleNames=["p4", "p5"], ), } reauth_handler = ReAuthHandler( rules_by_sid, MockSessionProxyResponderStub(), ) servicer = PolicyRpcServicer( reauth_handler, rules_by_basename, MockPolicyAssignmentControllerStub2(), ) # Bind the rpc server to a free port thread_pool = futures.ThreadPoolExecutor(max_workers=10) rpc_server = grpc.server(thread_pool) port = rpc_server.add_insecure_port('0.0.0.0:0') # Create a mock "mconfig" for the servicer to use mconfig = unittest.mock.Mock() mconfig.ip_block = None # Add the servicer servicer.add_to_server(rpc_server) rpc_server.start() # Create a rpc stub channel = grpc.insecure_channel('0.0.0.0:{}'.format(port)) stub = PolicyDBStub(channel) req = EnableStaticRuleRequest( imsi="s1", rule_ids=["p1", "p2", "p3"], base_names=["bn1"], ) with self.assertRaises(grpc.RpcError): stub.EnableStaticRules(req) self.assertFalse( "s1" in rules_by_sid, "There should be no installed policies for s1", )
[ 9, 180, 17867 ]
def METHOD_NAME(self): return self.PORT_START
[ 237, 447 ]
def METHOD_NAME( cpu_only, py3_only, ecs_container_instance, mxnet_training, training_cmd, ecs_cluster_name ): """ CPU Gluon NLP for MXNet Training Instance Type - c5.9xlarge DGL is only supported in py3, hence we have used the "py3_only" fixture to ensure py2 images don't run on this function. Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster. """ instance_id, cluster_arn = ecs_container_instance ecs_utils.ecs_training_test_executor( ecs_cluster_name, cluster_arn, training_cmd, mxnet_training, instance_id )
[ 9, 2906, 3591, 2685, 3592, 2265 ]
def METHOD_NAME(self): self.model = pin.buildSampleModelHumanoidRandom() self.parent_idx = self.model.getJointId("rarm2_joint") if self.model.existJointName("rarm2_joint") else (self.model.njoints-1) self.frame_name = self.model.names[self.parent_idx] + "_frame" self.frame_placement = pin.SE3.Random() self.frame_type = pin.FrameType.OP_FRAME self.model.addFrame(pin.Frame(self.frame_name, self.parent_idx, 0, self.frame_placement, self.frame_type)) self.frame_idx = self.model.getFrameId(self.frame_name)
[ 0, 1 ]
async def METHOD_NAME(request: web.Request): req_ctx = _RequestContext.parse_obj(request) path_params = parse_request_path_parameters_as(_WalletsGroupsPathParams, request) body_params = await parse_request_body_as(_WalletsGroupsBodyParams, request) wallet_groups: WalletGroupGet = await _groups_api.METHOD_NAME( request.app, user_id=req_ctx.user_id, wallet_id=path_params.wallet_id, group_id=path_params.group_id, read=body_params.read, write=body_params.write, delete=body_params.delete, ) return envelope_json_response(wallet_groups, web.HTTPCreated)
[ 129, 2945, 846 ]
def METHOD_NAME(): print """This script mainly serves as the basis of your customizations.
[ 38, 40 ]
def METHOD_NAME(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var]
[ 0, 485, 486 ]
def METHOD_NAME(arr): a = numpy.array(arr) dpnp_a = dpnp.array(arr) expected = numpy.repeat(a, 2) result = dpnp.repeat(dpnp_a, 2) assert_array_equal(expected, result)
[ 9, 5293 ]
def METHOD_NAME(*args, sender: Consumer = None, **kwargs): logger.info(f"worker {sender.hostname} ready") queue = sender.hostname.split("_", maxsplit=1)[1] logger.info(f"Updating repositories inside {queue}") if settings.REPO_DOWNLOADER_ENABLED and queue == get_queue_name(DEFAULT_QUEUE): for task in PeriodicTask.objects.filter( enabled=True, queue=queue, task="intel_owl.tasks.update" ): config_pk = task.kwargs["config_pk"] logger.info(f"Updating {config_pk}") update(config_pk)
[ 1794, 1338, 707 ]
def METHOD_NAME(self): tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
[ 1458 ]
def METHOD_NAME(test_file: str, check_gas_left: bool) -> None: run_control_flow_ops_vm_test(test_file, check_gas_left=check_gas_left)
[ 9, 15595 ]
def METHOD_NAME(modernize_project_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, subscription_id: Optional[pulumi.Input[Optional[str]]] = None, workload_instance_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadInstanceResult]: """ Gets the details of the workload instance. :param str modernize_project_name: ModernizeProject name. :param str resource_group_name: Name of the Azure Resource Group that project is part of. :param str subscription_id: Azure Subscription Id in which project was created. :param str workload_instance_name: Workload instance name. """ ...
[ 19, 2454, 89, 146 ]
f METHOD_NAME(self) -> None:
[ 2656 ]
def METHOD_NAME(self) -> list[Filter]: """ Load and return the release filtering plugin objects Returns ------- list of bandersnatch.filter.Filter: List of objects derived from the bandersnatch.filter.Filter class """ if RELEASE_PLUGIN_RESOURCE not in self.loaded_filter_plugins: self._load_filters([RELEASE_PLUGIN_RESOURCE]) return self.loaded_filter_plugins[RELEASE_PLUGIN_RESOURCE]
[ 527, 586, 1294 ]
def METHOD_NAME(part_id, stored_subs, language): stored_sub = stored_subs.get_any(part_id, language) if stored_sub and stored_sub.storage_type == "filesystem": return True
[ 220, 751, 3332 ]
def METHOD_NAME(): # Avidin protein sequence seq1 = seq.ProteinSequence("MVHATSPLLLLLLLSLALVAPGLSARKCSLTGKWTNDLGSNMTIGAVNSRGEFTGTYITAVTATSNEIKESPLHGTQNTINKRTQP" "TFGFTVNWKFSESTTVFTGQCFIDRNGKEVLKTMWLLRSSVNDIGDDWKATRVGINIFTRLRTQKE") # Streptavidin protein sequence seq2 = seq.ProteinSequence("MRKIVVAAIAVSLTTVSITASASADPSKDSKAQVSAAEAGITGTWYNQLGSTFIVTAGADGALTGTYESAVGNAESRYVLTGRYDSA" "PATDGSGTALGWTVAWKNNYRNAHSATTWSGQYVGGAEARINTQWLLTSGTTEANAWKSTLVGHDTFTKVKPSAASIDAAKKAGVNN" "GNPLDAVQQ") matrix = align.SubstitutionMatrix.std_protein_matrix() alignment = align.align_optimal(seq1, seq2, matrix)[0] profile = seq.SequenceProfile.from_alignment(alignment) assert seq.ProteinSequence("MRHIATAAIALSLLLLSITALASADPGKDSKAQLSAAEAGITGKWTNDLGSNFIIGAVGADGAFTGTYESAVGNAESNEIKEGPLD" "GAPATDGKGTALGWTFAFKNNWKFAESATTFSGQCFGGADARINGKELLTKGTMEANAWKSTLLGHDSFSKVKDIAADIDAAKKAG" "INIFNPLDAQKE") == profile.to_consensus()
[ 9, 24, 1810, 12514 ]
def METHOD_NAME(self, elements, size): groups = [] offsets = list(range(0, len(elements), size)) + [None] for i in range(0, len(offsets) - 1): groups.append(elements[offsets[i]:offsets[i+1]]) return groups
[ 93, 861 ]
def METHOD_NAME(self) -> int: """ How many steps in the forward and backwards direction :return: Number of steps in float, indicates half steps """ return self.distance / self.torso_step_length
[ 1783, 367, 29 ]
def METHOD_NAME(mock_alembic, mock_database): mock_database() alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path("")) alembic_util.init_alembic() assert mock_alembic.upgrade_calls == ["head"]
[ 9, 463, 954, 654, 71 ]
def METHOD_NAME(switch_driver) -> None: """ to check the protection mode (34934A module only) """ assert switch_driver.module[1].protection_mode() == 'AUTO100'
[ 9, 9089, 854 ]
def METHOD_NAME( databin_dir, direction, spm_vocab=SPM_VOCAB, prefix='', splits=['train', 'test', 'valid'], pairs_per_shard=None, ): def move_databin_files(from_folder, to_folder): for bin_file in glob.glob(f"{from_folder}/*.bin") \ + glob.glob(f"{from_folder}/*.idx") \ + glob.glob(f"{from_folder}/dict*"): try: shutil.move(bin_file, to_folder) except OSError as error: print(error) bpe_databin_dir = f"{BPE_DIR}/{direction}{prefix}_databin" bpe_dir = f"{BPE_DIR}/{direction}{prefix}" if pairs_per_shard is None: binarize_(bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=splits) move_databin_files(bpe_databin_dir, databin_dir) else: # binarize valid and test which will not be sharded binarize_( bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=[s for s in splits if s != "train"]) for shard_bpe_dir in glob.glob(f"{bpe_dir}/shard*"): path_strs = os.path.split(shard_bpe_dir) shard_str = path_strs[-1] shard_folder = f"{bpe_databin_dir}/{shard_str}" databin_shard_folder = f"{databin_dir}/{shard_str}" print(f'working from {shard_folder} to {databin_shard_folder}') os.makedirs(databin_shard_folder, exist_ok=True) binarize_( shard_bpe_dir, shard_folder, direction, spm_vocab=spm_vocab, splits=["train"]) for test_data in glob.glob(f"{bpe_databin_dir}/valid.*") + glob.glob(f"{bpe_databin_dir}/test.*"): filename = os.path.split(test_data)[-1] try: os.symlink(test_data, f"{databin_shard_folder}/{filename}") except OSError as error: print(error) move_databin_files(shard_folder, databin_shard_folder)
[ 8431 ]
def METHOD_NAME(path, base_port, long_timeout_commit=False): config = "configs/default.jsonnet" if long_timeout_commit is True: config = "configs/long_timeout_commit.jsonnet" cfg = Path(__file__).parent / config yield from setup_custom_evmos(path, base_port, cfg)
[ 102, 4723 ]
def METHOD_NAME(node_index, node_label, gnn_model, graph, criterion): gnn_model.METHOD_NAME() pred = gnn_model(graph, graph.node_feat["words"]) pred = paddle.gather(pred, node_index) loss = criterion(pred, node_label) acc = paddle.metric.accuracy(input=pred, label=node_label, k=1) return loss, acc
[ 1171 ]
def METHOD_NAME(field, value): return get_default_field_value(field, value)
[ 89, 2897 ]
def METHOD_NAME(self): self.fernet_mock.decrypt.return_value = 'a' * 32 self.db.router.get_uaid.return_value = dict( uaid=dummy_uaid, chid=dummy_chid, ) resp = yield self.client.post( self.url(api_ver='v1', token='ignored'), headers={'authorization': 'webpush dummy.key'} ) assert resp.get_status() == 410
[ 9, 377, 654, 2501, 44 ]
def METHOD_NAME(self) -> bool: """Return True if the alert has a Log Analytics WorkspaceID.""" return "WorkspaceId" in self._ids and "AgentId" in self._ids
[ 137, 623, 1976 ]
def METHOD_NAME(self) -> Headers: METHOD_NAME = self.__class__() METHOD_NAME._dict = self._dict.METHOD_NAME() METHOD_NAME._list = self._list.METHOD_NAME() return METHOD_NAME
[ 215 ]
def METHOD_NAME(op, data, index, **kwargs): return data.apply( lambda array, index=index: ( array[index] if -len(array) <= index < len(array) else None ) )
[ 750, 877, 724 ]
def METHOD_NAME(): param_names = ['cls', 'tilemap', 'start_rect', 'dx', 'dy', 'expect_dxdy'] scenaries = [ d for d in aux.case_generator(aux.first_expansion(maps_cache, aux.common_base_cases))] sufixes_parametrized_tests = [] cases = [] for cls in [RectMapCollider, RectMapWithPropsCollider]: for d in scenaries: d['cls'] = cls test_sufix, params = scenario_to_test_sufix_and_tuple_params(d) sufixes_parametrized_tests.append(test_sufix) cases.append(params) return (param_names, cases, False, sufixes_parametrized_tests)
[ 434, 43, 9, 654, 7457 ]
def METHOD_NAME(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE: """Plot a single or multiple values from the metric. Args: val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results. If no value is provided, will automatically call `metric.compute` and plot that result. ax: An matplotlib axis object. If provided will add plot to that axis Returns: Figure and Axes object Raises: ModuleNotFoundError: If `matplotlib` is not installed .. plot:: :scale: 75 >>> # Example plotting a single value >>> import torch >>> from torchmetrics.nominal import PearsonsContingencyCoefficient >>> metric = PearsonsContingencyCoefficient(num_classes=5) >>> metric.update(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,))) >>> fig_, ax_ = metric.plot() .. plot:: :scale: 75 >>> # Example plotting multiple values >>> import torch >>> from torchmetrics.nominal import PearsonsContingencyCoefficient >>> metric = PearsonsContingencyCoefficient(num_classes=5) >>> values = [ ] >>> for _ in range(10): ... values.append(metric(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,)))) >>> fig_, ax_ = metric.plot(values) """ return self._plot(val, ax)
[ 1288 ]
def METHOD_NAME(): paddle.set_device(args.device) model = ErnieForGeneration.from_pretrained(args.model_name_or_path) if "ernie-tiny" in args.model_name_or_path: tokenizer = ErnieTinyTokenizer.from_pretrained(args.model_name_or_path) elif "ernie" in args.model_name_or_path: tokenizer = ErnieTokenizer.from_pretrained(args.model_name_or_path) elif "roberta" in args.model_name_or_path or "rbt" in args.model_name_or_path: tokenizer = RobertaTokenizer.from_pretrained(args.model_name_or_path) elif "electra" in args.model_name_or_path: tokenizer = ElectraTokenizer.from_pretrained(args.model_name_or_path) else: tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path) dev_dataset = load_dataset("poetry", splits=("dev"), lazy=False) attn_id = tokenizer.vocab["[ATTN]"] if "[ATTN]" in tokenizer.vocab else tokenizer.vocab["[MASK]"] tgt_type_id = model.sent_emb.weight.shape[0] - 1 trans_func = convert_example( tokenizer=tokenizer, attn_id=attn_id, tgt_type_id=tgt_type_id, max_encode_len=args.max_encode_len, max_decode_len=args.max_decode_len, ) batchify_fn = lambda samples, fn=Tuple( Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_pids Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_sids Pad(axis=0, pad_val=tokenizer.pad_token_id), # tgt_ids Pad(axis=0, pad_val=tokenizer.pad_token_id), # tgt_pids Pad(axis=0, pad_val=tokenizer.pad_token_id), # tgt_sids Pad(axis=0, pad_val=tokenizer.pad_token_id), # attn_ids Pad(axis=0, pad_val=tokenizer.pad_token_id), # tgt_labels ): after_padding(fn(samples)) dev_dataset = dev_dataset.map(trans_func) dev_batch_sampler = paddle.io.BatchSampler(dev_dataset, batch_size=args.batch_size, shuffle=False) data_loader = DataLoader( dataset=dev_dataset, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn, num_workers=0, return_list=True ) rouge1 = Rouge1() rouge2 = Rouge2() if args.init_checkpoint: model_state = paddle.load(args.init_checkpoint) model.set_state_dict(model_state) model.eval() vocab = tokenizer.vocab eos_id = vocab[tokenizer.sep_token] sos_id = vocab[tokenizer.cls_token] pad_id = vocab[tokenizer.pad_token] unk_id = vocab[tokenizer.unk_token] vocab_size = len(vocab) evaluated_sentences_ids = [] reference_sentences_ids = [] logger.info("Evaluating...") for data in tqdm(data_loader): (src_ids, src_sids, src_pids, _, _, _, _, _, _, _, _, raw_tgt_labels) = data # never use target when infer # Use greedy_search_infilling or beam_search_infilling to get predictions output_ids = beam_search_infilling( model, src_ids, src_sids, eos_id=eos_id, sos_id=sos_id, attn_id=attn_id, pad_id=pad_id, unk_id=unk_id, vocab_size=vocab_size, max_decode_len=args.max_decode_len, max_encode_len=args.max_encode_len, beam_width=args.beam_width, length_penalty=args.length_penalty, tgt_type_id=tgt_type_id, ) for ids in output_ids.tolist(): if eos_id in ids: ids = ids[: ids.index(eos_id)] evaluated_sentences_ids.append(ids) for ids in raw_tgt_labels.numpy().tolist(): ids = ids[: ids.index(eos_id)] reference_sentences_ids.append(ids) score1 = rouge1.score(evaluated_sentences_ids, reference_sentences_ids) score2 = rouge2.score(evaluated_sentences_ids, reference_sentences_ids) logger.info("Rouge-1: %.5f ,Rouge-2: %.5f" % (score1 * 100, score2 * 100))
[ 1195 ]
def METHOD_NAME(identifier, case_id): return case_search.reverse_index_case_query(case_id, identifier)
[ 724, 527 ]
def METHOD_NAME(self): raise NotImplementedError
[ 86, 907 ]
def METHOD_NAME(self): self.acknowledge_lock.acquire() acknowledge = self.acknowledge self.acknowledge_lock.release() return acknowledge
[ 19, 7197 ]
def METHOD_NAME(self) -> str: """ Primary connection string of the alias if GEO DR is enabled """ return pulumi.get(self, "alias_primary_connection_string")
[ 533, 1379, 550, 144 ]
def METHOD_NAME(self): self._stack.append((self._current, self._key))
[ 1013 ]
def METHOD_NAME(self) -> str: """ The name of the ARM resource. """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(dash_duo): """Raising PreventUpdate OR returning no_update prevents update and triggering dependencies.""" initial_input = "initial input" initial_output = "initial output" app = Dash(__name__) app.layout = html.Div( [ dcc.Input(id="input", value=initial_input), html.Div(initial_output, id="output1"), html.Div(initial_output, id="output2"), ] ) callback1_count = Value("i", 0) callback2_count = Value("i", 0) @app.callback(Output("output1", "children"), [Input("input", "value")]) def callback1(value): callback1_count.value += 1 if callback1_count.value > 2: return no_update raise PreventUpdate("testing callback does not update") return value @app.callback(Output("output2", "children"), [Input("output1", "children")]) def callback2(value): callback2_count.value += 1 return value dash_duo.start_server(app) input_ = dash_duo.find_element("#input") for i, key in enumerate("xyz"): input_.send_keys(key) until( lambda: callback1_count.value == i + 2, timeout=3, msg="callback1 runs 4x (initial page load and 3x through send_keys)", ) dash_duo.wait_for_text_to_equal("#input", "initial inputxyz") assert ( callback2_count.value == 0 ), "callback2 is never triggered, even on initial load" # double check that output1 and output2 children were not updated assert dash_duo.find_element("#output1").text == initial_output assert dash_duo.find_element("#output2").text == initial_output assert not dash_duo.get_logs() dash_duo.percy_snapshot(name="aborted")
[ 9, -1, 10630, 1076 ]
def METHOD_NAME(x, i, v, op): """Applies an inplace op on (x, i, v). op is one of gen_array_ops.alias_inplace_update, gen_array_ops.alias_inplace_add, or gen_array_ops.alias_inplace_sub. If i is None, x and v must be the same shape. Computes x op v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] op v; Otherwise, x and v must have the same rank. Computes x[i, :] op v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. op: alias_inplace_update, alias_inplace_add, or alias_inplace_sub. Returns: Returns x. """ x = ops.convert_to_tensor(x) v = ops.convert_to_tensor(v, x.dtype) if i is None: # Full tensor. return array_ops.reshape( op(array_ops.reshape(x, [1, -1]), [0], array_ops.reshape(v, [1, -1])), array_ops.shape(x)) i = math_ops.cast(i, dtypes.int32) if i.get_shape().ndims == 0: # Single 0-dim update. return op(x, array_ops.reshape(i, [1]), array_ops.expand_dims(v, 0)) return op(x, i, v)
[ 5920, 1087 ]
def METHOD_NAME(self, flag): """ Tells what trust applies for the given flag -- True if trusted, False if explicitly not trusted, or None if trust for that flag has not been evaluated. """ if (self._affirmed & flag) == flag: return True if (self._denied & flag) == flag: return False return None
[ 9144, 43 ]
def METHOD_NAME(*args, **kwargs): hit_limit = limit and Scope.profile_count > limit if hit_limit or random.random() > probability: return f(*args, **kwargs) else: Scope.profile_count += 1 # Add a timestamp to the profile output when the callable # is actually called. final_log_file = '{}-{}{}'.format(base, datetime.now().isoformat(), ext) prof = cProfile.Profile() try: ret = prof.runcall(f, *args, **kwargs) finally: prof.dump_stats(final_log_file) return ret
[ 921 ]
def METHOD_NAME( self, item: __item_type__, column_desc: ColumnDesc ) -> Optional[QtGui.QColor]: if column_desc == self.NAME: return self._get_subscription_color(item, column_desc) else: return None
[ 19, 7466, 36 ]
def METHOD_NAME(self): self.num_nodes = 2 self.extra_args = [['-txindex'], ['-paytxfee=0.003']] self.setup_clean_chain = True
[ 0, 9, 434 ]
def METHOD_NAME(self): """ Try to create/destroy an utf-8 database name from an sls file #8947 """ expected_result = { "mysql_database_|-A_|-foo \xe6\xba\x96`bar_|-present": { "__run_num__": 0, "comment": "The database foo \xe6\xba\x96`bar has been created", "result": True, }, "mysql_database_|-B_|-foo \xe6\xba\x96`bar_|-absent": { "__run_num__": 1, "comment": "Database foo \xe6\xba\x96`bar has been removed", "result": True, }, } result = {} ret = self.run_function("state.sls", mods="mysql_utf8") if not isinstance(ret, dict): raise AssertionError( ("Unexpected result while testing external mysql utf8 sls: {}").format( repr(ret) ) ) for item, descr in ret.items(): result[item] = { "__run_num__": descr["__run_num__"], "comment": descr["comment"], "result": descr["result"], } self.assertEqual(expected_result, result)
[ 9, 3690, 280, 4687, 171 ]
def METHOD_NAME(StringList): return map(lambda s: "'" + s.replace("'", "''") + "'", StringList)
[ 197, 24, 1621, 144 ]
def METHOD_NAME(self, component_config: Union[Dict, GoodWeBatSetup, GoodWeCounterSetup, GoodWeInverterSetup]) -> None: if isinstance(component_config, Dict): component_type = component_config["type"] else: component_type = component_config.type component_config = dataclass_from_dict(COMPONENT_TYPE_TO_MODULE[ component_type].component_descriptor.configuration_factory, component_config) if component_type in self.COMPONENT_TYPE_TO_CLASS: self.components["component" + str(component_config.id)] = (self.COMPONENT_TYPE_TO_CLASS[component_type]( self.device_config.configuration.modbus_id, component_config, self.client)) else: raise Exception( "illegal component type " + component_type + ". Allowed values: " + ','.join(self.COMPONENT_TYPE_TO_CLASS.keys()) )
[ 238, 1007 ]
def METHOD_NAME(self): url = self.base_url + '?namespace=mynamespace' resp = self.client.get(url).json() assert len(resp['collection']['results']) == 10 assert resp['collection']['count'] == self.num_collections assert resp['collection']['results'][0]['name'] == \ self.collections[0].name assert resp['collection']['results'][9]['name'] == \ self.collections[9].name assert len(resp['repository']['results']) == 0 assert resp['repository']['count'] == self.num_repositories
[ 9, 19, 1174, 1170 ]
def METHOD_NAME(self): """Add all selected items from the current scene""" with preserve_expanded_rows(self.view): with preserve_selection(self.view): self.clear() nodes = commands.get_selected_nodes() items = commands.create_items_from_nodes(nodes) self.add_items(items)
[ 19, 449, 3407 ]
def METHOD_NAME(): """Test method""" testFailCount = 0 testMessages = [] unitTaskName = "unitTask" unitProcessName = "TestProcess" unitTestSim = SimulationBaseClass.SimBaseClass() testProcessRate = macros.sec2nano(0.5) testProc = unitTestSim.CreateNewProcess(unitProcessName) testProc.addTask(unitTestSim.CreateNewTask(unitTaskName, testProcessRate)) # setup module to be tested module = mappingInstrument.MappingInstrument() module.ModelTag = "mappingInstrumentTag" unitTestSim.AddModelToTask(unitTaskName, module) module.nodeBaudRate = 1. # Configure blank module input messages accessInMsgData1 = messaging.AccessMsgPayload() accessInMsgData1.hasAccess = 1 accessInMsg1 = messaging.AccessMsg().write(accessInMsgData1) accessInMsgData2 = messaging.AccessMsgPayload() accessInMsgData2.hasAccess = 0 accessInMsg2 = messaging.AccessMsg().write(accessInMsgData2) # subscribe input messages to module module.addMappingPoint(accessInMsg1, '1') module.addMappingPoint(accessInMsg2, 'data2') # setup output message recorder objects dataLogs = [] for idx in range(0, 2): dataLogs.append(module.dataNodeOutMsgs[idx].recorder()) unitTestSim.AddModelToTask(unitTaskName, dataLogs[idx]) unitTestSim.InitializeSimulation() unitTestSim.ConfigureStopTime(macros.sec2nano(1.0)) unitTestSim.ExecuteSimulation() # pull module data and make sure it is correct dataAmt = [] dataNames = [] for idx in range(0, 2): dataNames.append(dataLogs[idx].dataName) dataAmt.append(dataLogs[idx].baudRate) dataAmt = np.array(dataAmt) dataNames = np.array(dataNames) if not np.array_equal(dataAmt[0,:], np.array([1., 1., 1.])): testFailCount += 1 if not np.array_equal(dataAmt[1,:], np.array([0., 0., 0.])): testFailCount += 1 if not np.array_equal(dataNames[0,:], np.array(['1', '1', '1'])): testFailCount += 1 if not np.array_equal(dataNames[1,:], np.array(['data2', 'data2', 'data2'])): testFailCount += 1 if testFailCount == 0: print("PASSED: " + module.ModelTag) else: print(testMessages) return [testFailCount, "".join(testMessages)]
[ 445, 2933, 9, 559 ]
def METHOD_NAME( exploiter_config, callback, scan_completed, stop, hosts_to_exploit, ): callback_barrier_count = 2 def _callback(*_): # Block all threads here until 2 threads reach this barrier, then set stop # and test that neither thread continues to scan. _callback.barrier.wait() stop.set() _callback.barrier = Barrier(callback_barrier_count) stoppable_callback = MagicMock(side_effect=_callback) # Intentionally NOT setting scan_completed.set(); _callback() will set stop e = Exploiter(MockPuppet(), callback_barrier_count + 2) e.exploit_hosts( exploiter_config, hosts_to_exploit, 1, SERVERS, stoppable_callback, scan_completed, stop ) assert stoppable_callback.call_count == 2
[ 9, 631, 1887, 1076 ]
def METHOD_NAME(repo): """ Given: A modeling rule yml with schema key missing When: running are_keys_empty_in_yml Then: Validate that the modeling rule is invalid """ yml_dict = { "id": "modeling-rule", "name": "Modeling Rule", "fromversion": 3.3, "tags": "tag", "rules": "", } pack = repo.create_pack("TestPack") dummy_modeling_rule = pack.create_modeling_rule("MyRule") structure_validator = StructureValidator(dummy_modeling_rule.yml.path) dummy_modeling_rule.yml.write_dict(yml_dict) with ChangeCWD(repo.path): modeling_rule_validator = ModelingRuleValidator(structure_validator) assert not modeling_rule_validator.are_keys_empty_in_yml()
[ 9, 137, 1038, 59, 280, 5024 ]
def METHOD_NAME(code): command = ['secretcli', 'query', 'compute', 'list-contract-by-code', code] return json.loads(run_command(command))
[ 245, 1522, 604, 544 ]
def METHOD_NAME(self): asset = presences.ActivityAssets( application_id=None, large_image=None, large_text=None, small_image="meow:nyaa", small_text=None ) with pytest.raises(RuntimeError, match="Unknown asset type"): asset.make_small_image_url()
[ 9, 93, 565, 660, 274, 1646, 46 ]
def METHOD_NAME(self)-> KeyBindingsBase: """All key binding for the Dialog with Navigation bar Returns: KeyBindings: The method according to the binding key """ kb = KeyBindings() @kb.add('pageup', eager=True) ### eager neglect any other keybinding def _go_pageup(event) -> None: if self.navbar : app = get_app() self.navbar.go_up() app.layout.focus(self.navbar) @kb.add('pagedown', eager=True) def _go_pagedown(event) -> None: if self.navbar: app = get_app() self.navbar.go_down() app.layout.focus(self.navbar) @kb.add('f2', eager=True) def _go_up(event) -> None: if self.button_functions: for k in range(len(self.button_functions)): if str(self.button_functions[k][1]).lower() == 'save': self.button_functions[k][0]() return kb
[ 19, 9105, 681, 59, 5992 ]
def METHOD_NAME(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension: return RelativeLinksHelpExtension(*args, **kwargs)
[ 93, 2916 ]
def METHOD_NAME(self): class SelfCheckExceptionHandling(Exception): pass caller_thread_id = threading.current_thread().ident @run_in_thread_with_timeout def my_errant_function(*_args, **_kwargs): if threading.current_thread().ident != caller_thread_id: raise SelfCheckExceptionHandling() # Suppress error output by redirecting to stringio_stderr stringio_stderr = pika.compat.StringIO() try: with mock.patch.object(_ThreadedTestWrapper, '_stderr', stringio_stderr): with self.assertRaises(AssertionError) as exc_ctx: my_errant_function() self.assertIn('raise SelfCheckExceptionHandling()', exc_ctx.exception.args[0]) expected_tail = 'SelfCheckExceptionHandling\n' self.assertEqual(exc_ctx.exception.args[0][-len(expected_tail):], expected_tail) self.assertIn('raise SelfCheckExceptionHandling()', stringio_stderr.getvalue()) self.assertEqual(stringio_stderr.getvalue()[-len(expected_tail):], expected_tail) except Exception: try: print('This stderr was captured from our thread wrapper:\n', stringio_stderr.getvalue(), file=sys.stderr) except Exception: # pylint: disable=W0703 pass raise
[ 9, 2409, 47, 374, 280, 9, 2046 ]
def METHOD_NAME(): """ "original": "None<br/><br/>Must be enrolled in Exercise Physiology (3871) <br/><br/>", "processed": "Must be Exercise Physiology (3871)" """ return "3871"
[ 6286, 790, 12545 ]
f METHOD_NAME(self):
[ 9, 236, 2059 ]
def METHOD_NAME(python_app_type): if python_app_type == "wsgi": return """
[ 440, 991, 2433 ]
def METHOD_NAME(): """ api: paddle.dist op version: 12 """ op = Net(p=float('-inf')) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'dist', [12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 3, 3]).astype('float32')), paddle.to_tensor( randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) obj.run()
[ 9, 1260, 1962 ]
def METHOD_NAME(self): pkgver = self.fields['pkgver'][0] if 'pkgver' in self.fields else None if pkgver is not None: m = re.search(br'-([^-])*$', pkgver) if m: return m.group(1) return None
[ 586 ]
def METHOD_NAME(self): from traitsui.api import View, Item, Group, InstanceEditor, EnumEditor from PYME.ui.custom_traits_editors import HistLimitsEditor, CBEditor return View([#Group([ Item('dsname', label='Data', editor=EnumEditor(name='_datasource_choices')), Item('method'), Item('depth'),Item('min_points'), #Item('vertexColour', editor=EnumEditor(name='_datasource_keys'), label='Colour'), #Group([Item('clim', editor=HistLimitsEditor(data=self._get_cdata), show_label=False), ]), Group([Item('cmap', label='LUT'), Item('alpha')])], ) # buttons=['OK', 'Cancel'])
[ 235, 1179 ]
def METHOD_NAME(self): if hasattr(self.optimizer, "supports_step_with_scale"): return self.optimizer.METHOD_NAME return False
[ 1466, 367, 41, 930 ]
def METHOD_NAME(self, text): """ Set image type depending of user choice """ if "l2" in text: self.uiTypeComboBox.setCurrentIndex(0) # L2 image elif "l3" in text: self.uiTypeComboBox.setCurrentIndex(1) # L3 image
[ 660, 534, 2004, 526, 1180, 3572 ]
def METHOD_NAME(self, route_point, cleverage_resp): logging.getLogger(__name__).debug('cleverage response: {}'.format(cleverage_resp)) line_code = route_point.fetch_line_id(self.object_id_tag) schedules = next( (line['schedules'] for line in cleverage_resp if line['code'].lower() == line_code.lower()), None ) if schedules: next_passages = [] for next_expected_st in schedules: # for the moment we handle only the NextStop and the direction dt = self._get_dt(next_expected_st['departure']) direction = next_expected_st.get('destination_name') is_real_time = next_expected_st.get('realtime') == '1' next_passage = RealTimePassage(dt, direction, is_real_time) next_passages.append(next_passage) return next_passages else: return None
[ 19, 14101 ]
def METHOD_NAME( self, N, G, D, H, W, epsilon, order, gc, dc): op = core.CreateOperator( "GroupNorm", ["X", "gamma", "beta"], ["Y", "mean", "inv_std"], group=G, epsilon=epsilon, order=order, ) C = G * D if order == "NCHW": X = np.random.randn(N, C, H, W).astype(np.float32) + 1.0 else: X = np.random.randn(N, H, W, C).astype(np.float32) + 1.0 gamma = np.random.randn(C).astype(np.float32) beta = np.random.randn(C).astype(np.float32) inputs = [X, gamma, beta] def ref_op(X, gamma, beta): if order == "NCHW": return self.group_norm_nchw_ref(X, gamma, beta, G, epsilon) else: return self.group_norm_nhwc_ref(X, gamma, beta, G, epsilon) self.assertReferenceChecks( device_option=gc, op=op, inputs=inputs, reference=ref_op, threshold=5e-3, ) self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
[ 9, 846, 387, 1085 ]
METHOD_NAME(self):
[ 9, 1024, 846 ]
def METHOD_NAME(self): @dsl.container_component def hello_world_io( text: str, text_output_path: dsl.OutputPath(str)) -> dsl.ContainerSpec: """Hello world component with input and output.""" return dsl.ContainerSpec( image='python:3.7', command=['echo'], args=['--text', text, '--output_path', text_output_path]) self.assertIsInstance(hello_world_io, container_component_class.ContainerComponent)
[ 9, 717, 41, 53, 249 ]
def METHOD_NAME() -> None: lex = FilterSyntaxLexer().lex data = "string-2-2 string" assert [(tok.type, tok.value) for tok in lex(data)] == [ ("STRING", "string-2-2"), ("STRING", "string"), ]
[ 9, 5886, 13616, 623, 144 ]
def METHOD_NAME(self, get_proxy: MagicMock, RequestParser: MagicMock) -> None: input_data = [ json.dumps({ 'anykey1': 'anyval1' }), json.dumps({ 'anykey2': 'anyval2' }) ] RequestParser().parse_args.return_value = dict(data=input_data, index='fake_index') with self.assertRaises(ValidationError): DocumentTablesAPI().put()
[ 9, 1276, 107, 2253, 216 ]
def METHOD_NAME(self) -> None: data = bytearray(ICON_BANNER_SIZE) write_u16(data, self.version, 0x0) data[0x20 : 0x20 + 0x200] = self.icon.bitmap data[0x220 : 0x220 + 0x20] = self.icon.palette data[0x240 : 0x240 + 0x100] = _utf16_encode_fixed(self.title_japanese) data[0x340 : 0x340 + 0x100] = _utf16_encode_fixed(self.title_english) data[0x440 : 0x440 + 0x100] = _utf16_encode_fixed(self.title_french) data[0x540 : 0x540 + 0x100] = _utf16_encode_fixed(self.title_german) data[0x640 : 0x640 + 0x100] = _utf16_encode_fixed(self.title_italian) data[0x740 : 0x740 + 0x100] = _utf16_encode_fixed(self.title_spanish) calculated_checksum = nds_crc16(data, 0x20, 0x820) write_u16(data, calculated_checksum, 0x2) self.rom.iconBanner = data
[ 73, 24, 11121 ]
def METHOD_NAME(self, args: Dict[str, Any]) -> None: """ Run the notebook. """ # Exclude arguments that are not needed to be passed to the notebook ignore_keys = {constants.LOG_LEVEL_ARG, constants.OUTPUT_NOTEBOOK_ARG} nb_parameters = {key:val for key,val in args.items() if key not in ignore_keys} # Get environment variables env_vars = HiveToBigQueryScript.get_env_vars() nb_parameters.update(env_vars) # Run the notebook output_path = args[constants.OUTPUT_NOTEBOOK_ARG] pm.execute_notebook( 'hive2bq/HiveToBigquery_notebook.ipynb', output_path, parameters=nb_parameters, log_output=True )
[ 22 ]
def METHOD_NAME(sym=None, with_private=False, with_color=True, color=None, color2=None, truncate=True, with_methods=True, _larch=None): """show group members: Options ------- with_private: show 'private' members ('__private__') if True with_color: show alternating lines in color if True and color is available. truncate: truncate representation of lengthy lists and tuples if True with_methods: suppress display of methods if False """ if sym is None: sym = _larch.symtable group = None symtable = _larch.symtable display = symtable._sys.display with_color = with_color and display.use_color title = sym if symtable.isgroup(sym): group = sym title = repr(sym)[1:-1] elif isinstance(sym, types.ModuleType): group = sym title = sym.__name__ if group is None: _larch.writer.write("%s\n" % repr(sym)) return if title.startswith(symtable.top_group): title = title[6:] if group == symtable: title = 'Group _main' ## set colors for output colopts1 = display.colors['text'] colopts2 = display.colors['text2'] if with_color: if color is not None: colopts1['color'] = color if color2 is not None: colopts2['color'] = color2 _copts = {1: colopts1, 0: colopts2} members = dir(group) dmembers = [] nmethods = 0 for item in members: if (item.startswith('__') and item.endswith('__') and not with_private): continue obj = getattr(group, item) if callable(obj): nmethods +=1 if not with_methods: continue dmembers.append((item, obj)) write = _larch.writer.write color_output = hasattr(_larch.writer, 'set_textstyle') title_fmt = '== %s: %i methods, %i attributes ==\n' write(title_fmt % (title, nmethods, len(dmembers)-nmethods)) count = 0 for item, obj in dmembers: if (isinstance(obj, numpy.ndarray) and (len(obj) > 10 or len(obj.shape)>1)): dval = "array<shape=%s, type=%s>" % (repr(obj.shape), repr(obj.dtype)) elif isinstance(obj, (list, tuple)) and truncate and len(obj) > 5: dval = "[%s, %s, ... %s, %s]" % (repr(obj[0]), repr(obj[1]), repr(obj[-2]), repr(obj[-1])) else: try: dval = repr(obj) except: dval = obj if color_output: _larch.writer.set_textstyle({True:'text', False:'text2'}[(count%2)==1]) count += 1 write(' %s: %s\n' % (item, dval)) if color_output: _larch.writer.set_textstyle('text') _larch.writer.flush()
[ 697 ]
def METHOD_NAME(): """ test install function with restart=True """ mock_retcode = MagicMock(return_value=0) path = "C:\\KB123456.msu" with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}): assert win_wusa.install(path, restart=True) is True mock_retcode.assert_called_once_with( ["wusa.exe", path, "/quiet", "/forcerestart"], ignore_retcode=True )
[ 9, 428, 1141 ]
def METHOD_NAME( config_settings: Optional[Mapping[str, Any]] = None ) -> List[str]: return []
[ 19, 139, 43, 56, 792 ]
def METHOD_NAME(actual, desired): """Assert all-close for `astropy.coordinates.SkyCoord` objects. - Frames can be different, aren't checked at the moment. """ assert isinstance(actual, SkyCoord) assert isinstance(desired, SkyCoord) assert_allclose(actual.data.lon.deg, desired.data.lon.deg) assert_allclose(actual.data.lat.deg, desired.data.lat.deg)
[ 638, 16055, 5362 ]
def METHOD_NAME(def_config, temp_db_conn, tiger_table, tokenizer_mock, tmp_path): freeze.drop_update_tables(temp_db_conn) with pytest.raises(UsageError) as excinfo: tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock()) assert "database frozen" in str(excinfo.value) assert tiger_table.count() == 0
[ 9, 238, 10700, 365, 463, 2639 ]
def METHOD_NAME(request_body, mocker): handler = RedirectHandlerFactory('all', False) inst = handler() req = urllib_request.Request( 'https://ansible.com/', 'POST' ) req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod') inst.redirect_request(req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={}, method='GET', origin_req_host='ansible.com', unverifiable=True)
[ 9, 75, 13958, 72 ]
def METHOD_NAME(user_rincewind: models.User) -> models.AccountPhone: """Phone number for user fixture.""" return user_rincewind.add_phone(TEST_NEW_PHONE)
[ -1, 17841 ]
def METHOD_NAME(client_id: int): return f"site-{client_id}"
[ 19, 340, 156 ]
def METHOD_NAME(lst: Iterator[Any]) -> List[Any]: return [item for sublist in lst for item in sublist]
[ 247 ]
def METHOD_NAME(settings): global c if c is None: c = APIDiscourseClient(settings) return c
[ 19, 6994, 340 ]
def METHOD_NAME(value, key): """Lookup key in a dictionary""" return value.get(key, value)
[ 1906 ]
def METHOD_NAME(): self.is_connected = False
[ 1790 ]
def METHOD_NAME(self, msg): self.messages.append(msg)
[ 697, 277 ]
def METHOD_NAME(self): module = snippets.asm_to_gtirb( """ .ref: adr r0, .data0 .load: vld1.8 {d0}, [r0] b .end .data0: .byte 0 .align 2 .end: """, arch=gtirb.Module.ISA.ARM, ) accesses = snippets.parse_souffle_output( module, "composite_data_access" ) self.assertIn( ( next(module.symbols_named(".ref")).referent.address, next(module.symbols_named(".load")).referent.address, next(module.symbols_named(".data0")).referent.address, 8, ), accesses, )
[ 9, 3208, 3209, 3210 ]
def METHOD_NAME(app, url, destination, appVersion=""): # First download the file targzFile = "/tmp/xcarchive.tar.gz" # Clean first if os.path.exists("/tmp/%s.xcarchive" % app): os.system("rm -rf /tmp/%s.xcarchive" % app) responseCode = downloadFile(url, credentials.jenkins['login'], credentials.jenkins['password'], targzFile) if 200 == responseCode: # Not very nice: Let Mac OSX untargzipped everything os.system("open %s" % targzFile) print "Sleeping 5s to let time for the system to unzip xcarchive.tar.gz..." time.sleep(5) # And copy the symbol file at requested destination if not os.path.exists(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) print "cp /tmp/%s.xcarchive/dSYMs/%s.app.dSYM/Contents/Resources/DWARF/%s %s" % (app, app, app, destination) os.system("cp /tmp/%s.xcarchive/dSYMs/%s.app.dSYM/Contents/Resources/DWARF/%s %s" % (app, app, app, destination)) # Check the expected symbols file is here if not os.path.exists(destination): print "Error: Can't extract the symbols file" responseCode = 404 return responseCode
[ 136, -1, 754, 755 ]
def METHOD_NAME(settings, max_blocks_per_call=10000): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpassword']) height = settings['min_height'] while height < settings['max_height']+1: num_blocks = min(settings['max_height']+1-height, max_blocks_per_call) batch = [] for x in range(num_blocks): batch.append(rpc.build_request(x, 'getblockhash', [height + x])) reply = rpc.execute(batch) if reply is None: print('Cannot continue. Program will halt.') return None for x,resp_obj in enumerate(reply): if rpc.response_is_error(resp_obj): print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr) sys.exit(1) assert(resp_obj['id'] == x) # assume replies are in-sequence if settings['rev_hash_bytes'] == 'true': resp_obj['result'] = hex_switchEndian(resp_obj['result']) print(resp_obj['result']) height += num_blocks
[ 19, 573, 2012 ]
def METHOD_NAME(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ 24, 3 ]
def METHOD_NAME(self): self.assertIn(self.profile, self.organizations['open']) self.assertIn(self.profile.id, self.organizations['open']) self.assertNotIn(self.users['superuser'].profile, self.organizations['open']) self.assertNotIn(self.users['superuser'].profile.id, self.organizations['open']) with self.assertRaisesRegex(TypeError, 'Organization membership test'): 'aaaa' in self.organizations['open']
[ 9, 1992 ]
def METHOD_NAME(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME( column_type_mapping: Dict, columns: List[str], full_table_name: str, key_properties: List[str], schema: Dict, unique_constraints: List[str] = None, use_lowercase: bool = True, ) -> str: columns_and_types = [] column_properties = schema['properties'] for col in columns: column_name = clean_column_name(col, lower_case=use_lowercase) column_type = column_type_mapping[col]['type_converted'] if COLUMN_TYPE_INTEGER == column_type_mapping[col]['type']: column_type = 'INTEGER' column_props = [] if COLUMN_TYPE_NULL not in column_properties.get(col).get('type', []): column_props.append('NOT NULL') col_statement = f'{column_name} {column_type}' if len(column_props) >= 1: col_statement = f"{col_statement} {' '.join(column_props)}" columns_and_types.append(col_statement) if unique_constraints: unique_constraints = [clean_column_name(col, lower_case=use_lowercase) for col in unique_constraints] index_name = '_'.join([ clean_column_name(full_table_name, lower_case=use_lowercase), ] + unique_constraints) index_name = f'unique{index_name}'[:64] columns_and_types.append(f"CONSTRAINT {index_name} Unique({', '.join(unique_constraints)})") if key_properties and len(key_properties) >= 1: col = clean_column_name(key_properties[0], lower_case=use_lowercase) columns_and_types.append(f'PRIMARY KEY ({col})') return f"CREATE TABLE {full_table_name} ({', '.join(columns_and_types)})"
[ 56, 129, 410, 462 ]
def METHOD_NAME() -> LitestarGroup: import litestar.cli.main return cast("LitestarGroup", importlib.reload(litestar.cli.main).litestar_group)
[ 1563, 462 ]