text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(resource_id, resource_type, region, **kwargs): """Adds tags to an AliCloud resource created by PerfKitBenchmarker. Args: resource_id: An extant AliCloud resource to operate on. resource_type: The type of the resource. region: The AliCloud region 'resource_id' was created in. **kwargs: dict. Key-value pairs to set on the instance. """ if not kwargs: return tag_cmd = ALI_PREFIX + [ 'ecs', 'AddTags', '--RegionId', region, '--ResourceId', resource_id, '--ResourceType', resource_type ] tag_cmd.extend(_BuildTagsList(**kwargs)) vm_util.IssueRetryableCommand(tag_cmd)
[ 238, 114 ]
def METHOD_NAME(coord, board_size): # the pass move is represented either by [] ( = empty coord ) # OR by [tt] (for boards <= 19 only) return len(coord) == 0 or ( board_size <= 19 and coord == 'tt' )
[ 137, 403, 132 ]
def METHOD_NAME(response: PipelineResponse[HTTPRequestType, AllHttpResponseType]) -> Optional[float]: """Get the value of Retry-After in seconds. :param response: The PipelineResponse object :type response: ~azure.core.pipeline.PipelineResponse :return: Value of Retry-After in seconds. :rtype: float or None """ headers = case_insensitive_dict(response.http_response.headers) retry_after = headers.get("retry-after") if retry_after: return parse_retry_after(retry_after) for ms_header in ["retry-after-ms", "x-ms-retry-after-ms"]: retry_after = headers.get(ms_header) if retry_after: parsed_retry_after = parse_retry_after(retry_after) return parsed_retry_after / 1000.0 return None
[ 19, 2052, 1887 ]
def METHOD_NAME(ddev, repository, helpers): import yaml check = 'apache' spec_yaml = repository.path / check / 'assets' / 'configuration' / 'spec.yaml' with spec_yaml.open(encoding='utf-8') as file: spec_info = yaml.safe_load(file) spec_info['files'][0]['options'][1]['options'] = spec_info['files'][0]['options'][1]['options'][0] output = yaml.safe_dump(spec_info, default_flow_style=False, sort_keys=False) with spec_yaml.open(mode='w', encoding='utf-8') as file: file.write(output) result = ddev('validate', 'http', check) assert result.exit_code == 1, result.output assert helpers.remove_trailing_spaces(result.output) == helpers.dedent( """ HTTP wrapper validation └── Apache Detected apache is missing `instances/http` or `instances/openmetrics_legacy` template in spec.yaml Errors: 1 """ )
[ 9, 1457, 1038, 89 ]
def METHOD_NAME(self): if self.is_periodic and self.crontab: return _('Regularly perform') + " ( {} )".format(self.crontab) if self.is_periodic and self.interval: return _('Cycle perform') + " ( {} h )".format(self.interval) return '-'
[ 2728, 52 ]
def METHOD_NAME(cls, parser): """Add optimizer-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc())
[ 238, 335 ]
def METHOD_NAME(self, queue, message, **kwargs): """Put message onto queue.""" q = self._new_queue(queue) q.push(dumps(message))
[ 1276 ]
def METHOD_NAME(self): """ Whether this symbol is a function """ return self.type is SymbolType.TYPE_FUNCTION
[ 137, 559 ]
def METHOD_NAME(self): # handle_stream may be a native coroutine. class TestServer(TCPServer): async def handle_stream(self, stream, address): stream.write(b"data") stream.close() sock, port = bind_unused_port() server = TestServer() server.add_socket(sock) client = IOStream(socket.socket()) yield client.connect(("localhost", port)) result = yield client.read_until_close() self.assertEqual(result, b"data") server.stop() client.close()
[ 9, 276, 919, 1577, 7767 ]
def METHOD_NAME(self, container): # type: (dict) -> None """Remove loaded container.""" # Delete container and its contents if cmds.objExists(container['objectName']): members = cmds.sets(container['objectName'], query=True) or [] cmds.delete([container['objectName']] + members) # Remove the namespace, if empty namespace = container['namespace'] if cmds.namespace(exists=namespace): members = cmds.namespaceInfo(namespace, listNamespace=True) if not members: cmds.namespace(removeNamespace=namespace) else: self.log.warning("Namespace not deleted because it " "still has members: %s", namespace)
[ 188 ]
def METHOD_NAME(art_warning, image_batch, length, channels_first): try: cutout = CutoutPyTorch(length=length, channels_first=channels_first) count = np.not_equal(cutout(image_batch)[0], image_batch).sum() n = image_batch.shape[0] if channels_first: channels = image_batch.shape[1] else: channels = image_batch.shape[-1] assert count <= n * channels * length * length except ARTTestException as e: art_warning(e)
[ 9, 1979, 660, 365 ]
def METHOD_NAME(job_name): envs = [] if "ps" == job_name: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") elif "worker" == job_name: envs.append("OMP_NUM_THREADS=6") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") elif "evaluator" == job_name or "chief" == job_name: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") else: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") return envs
[ 0, 5, 6 ]
def METHOD_NAME( self, metrics_df: pd.DataFrame, formula_images_rdd: pyspark.RDD, alpha_channel: np.ndarray, db: DB, ): """Store ion metrics and iso images. Args: metrics_df: formula, adduct, msm, fdr, individual metrics formula_images_rdd: collection of 2d intensity arrays (in coo_matrix format) alpha_channel: Image alpha channel (2D, 0..1) db: database connection """ logger.info('Storing search results to the DB') ion_image_ids = self._post_images_to_image_store( formula_images_rdd, alpha_channel, self.n_peaks ) self.store_ion_metrics(metrics_df, ion_image_ids, db)
[ 1308 ]
def METHOD_NAME(x): return x
[ 2989 ]
def METHOD_NAME(self, n=1, left=False): """ The trace of a ribbon diagram. Parameters: n : The number of wires to trace. """ if not n: return self if left: return self.caps(self.dom[:n].r, self.dom[:n]) @ self.dom[n:]\ >> self.dom[:n].r @ self\ >> self.cups(self.cod[:n].r, self.cod[:n]) @ self.cod[n:] return self.dom[:-n] @ self.caps(self.dom[-n:], self.dom[-n:].r)\ >> self @ self.dom[-n:].r\ >> self.cod[:-n] @ self.cups(self.cod[-n:], self.cod[-n:].r)
[ 2576 ]
def METHOD_NAME( info: protocols.CircuitDiagramInfo, ) -> protocols.CircuitDiagramInfo: labels = [escape_text_for_latex(e) for e in info.wire_symbols] if info.exponent != 1: labels[0] += '^{' + str(info.exponent) + '}' symbols = tuple(r'\gate{' + l + '}' for l in labels) return protocols.CircuitDiagramInfo(symbols)
[ 197, 526, 2056, 100, 24, -1, 2056 ]
def METHOD_NAME(self, ImageID): if ImageID == self.ImageID: self.Referenced = True
[ 0, 660, 147, 2167 ]
def METHOD_NAME(obj: pd.DataFrame, store=None) -> np.ndarray: if not isinstance(obj, pd.DataFrame): raise TypeError("input must be a pd.DataFrame") if isinstance(store, dict): store["columns"] = obj.columns store["index"] = obj.index return obj.to_numpy(dtype="float")
[ 197, 2681, 1305, 24, 2212, 947, 4045 ]
def METHOD_NAME(billing_account: Optional[pulumi.Input[Optional[str]]] = None, display_name: Optional[pulumi.Input[Optional[str]]] = None, lookup_projects: Optional[pulumi.Input[Optional[bool]]] = None, open: Optional[pulumi.Input[Optional[bool]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBillingAccountResult]: """ Use this data source to get information about a Google Billing Account. ```python import pulumi import pulumi_gcp as gcp acct = gcp.organizations.get_billing_account(display_name="My Billing Account", open=True) my_project = gcp.organizations.Project("myProject", project_id="your-project-id", org_id="1234567", billing_account=acct.id) ``` :param str billing_account: The name of the billing account in the form `{billing_account_id}` or `billingAccounts/{billing_account_id}`. :param str display_name: The display name of the billing account. :param bool lookup_projects: `true` if projects associated with the billing account should be read, `false` if this step should be skipped. Setting `false` may be useful if the user permissions do not allow listing projects. Defaults to `true`. > **NOTE:** One of `billing_account` or `display_name` must be specified. :param bool open: `true` if the billing account is open, `false` if the billing account is closed. """ ...
[ 19, 4094, 598, 146 ]
def METHOD_NAME(self): """Test that (K-1) folds can be merged into train dataset.""" n_samples = 100 n_features = 10 n_tasks = 10 X = np.random.rand(n_samples, n_features) p = .05 # proportion actives y = np.random.binomial(1, p, size=(n_samples, n_tasks)) w = np.ones((n_samples, n_tasks)) dataset = dc.data.NumpyDataset(X, y, w) K = 5 task_splitter = dc.splits.TaskSplitter() fold_datasets = task_splitter.k_fold_split(dataset, K) # Number tasks per fold n_per_fold = 2 for fold in range(K): train_inds = list(set(range(K)) - set([fold])) train_fold_datasets = [fold_datasets[ind] for ind in train_inds] train_dataset = dc.splits.merge_fold_datasets(train_fold_datasets) # Find the tasks that correspond to this test fold train_tasks = list( set(range(10)) - set(range(fold * n_per_fold, (fold + 1) * n_per_fold))) # Assert that all arrays look like they should np.testing.assert_array_equal(train_dataset.X, X) np.testing.assert_array_equal(train_dataset.y, y[:, train_tasks]) np.testing.assert_array_equal(train_dataset.w, w[:, train_tasks]) np.testing.assert_array_equal(train_dataset.X, X)
[ 9, 411, 3848, 4146 ]
def METHOD_NAME(self, optim_cls, *args, **kwargs): module_optim = MyModule() module_functional = MyModule() optim_params = module_optim.parameters() functional_params = module_functional.parameters() optim = optim_cls(optim_params, *args, **kwargs) functional_optim_cls = functional_optim_map.get(optim_cls, None) if not functional_optim_cls: raise ValueError(f"Functional optimizer not implemented for {optim_cls}") optim_functional = functional_optim_cls( [], *args, **kwargs, _allow_empty_param_list=True ) if not hasattr(optim_functional, "step_param"): raise ValueError( f"Functional optimizer class {optim_functional} must implement step_param method." ) # Initial weights should match self._validate_parameters( module_optim.parameters(), module_functional.parameters() ) # Save old parameters to verify optimizer modifies them. old_module_optim_params = [ param.clone().detach() for param in module_optim.parameters() ] old_module_functional_params = [ param.clone().detach() for param in module_functional.parameters() ] t1 = torch.randn(3, 3) for _ in range(10): module_optim.zero_grad() module_functional.zero_grad() # Forward + Backward optim_out = module_optim(t1).sum() functional_out = module_functional(t1).sum() optim_out.backward() functional_out.backward() # Optimizer step optim.step() # Functional optimizer step_param for param in module_functional.parameters(): grad = param.grad optim_functional.step_param(param, grad) # Validate parameters are equal for optim_param, functional_param in zip( module_optim.parameters(), module_functional.parameters() ): self.assertEqual(optim_param, functional_param) # Validate parameters are modified. for i, (optim_param, functional_param) in enumerate( zip(module_optim.parameters(), module_functional.parameters()) ): self.assertNotEqual(old_module_optim_params[i], optim_param) self.assertNotEqual(old_module_functional_params[i], functional_param)
[ 9, 4167, 13051, 12270 ]
def METHOD_NAME(self) -> str: """ The path ID that uniquely identifies the object. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(self): #newAlignList, same format #TODO: add relationship type to alignment.render newAs = set() sgroups, tgroups = self.groupingByMatch() print("finished grouping") for idx, (tId,slist) in enumerate(sgroups.items()): newAList = self.findMeronymIngroup(slist, [(tId,)]) for multiA in newAList:#translate multiMatch to group of single matches sList, tList = multiA for idxS in sList: newAs.add((idxS, tId, 1))#TODO: what to do with rematching value score for idx, (sId, tlist) in enumerate(tgroups.items()): newAList = self.findMeronymIngroup(tlist, [(sId,)]) for multiA in newAList:#translate multiMatch to group of single matches sList, tList = multiA for idxT in tList: newAs.add((sId, idxT, 1))#TODO: what to do with rematching value score newAlignList = Alignment(list(newAs)) return newAlignList
[ 15558 ]
def METHOD_NAME(self): return self.client.format_url( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/orchestrators/{resourceName}", **self.url_parameters )
[ 274 ]
def METHOD_NAME(): """ Creates the argparse parser with all the arguments. """ parser = argparse.ArgumentParser( description='CLI for testing packet movement through pipelined,\ using RYU REST API & Scapy', formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) # Add subcommands subparsers = parser.add_subparsers(title='subcommands', dest='cmd') parser_dump = subparsers.add_parser('dump', help='Dump packet stats') parser_dump.add_argument('table_id', help='table id to print', type=int) parser_send = subparsers.add_parser('send', help='Send packets') parser_send.add_argument('iface', help='iface to send to') parser_send.add_argument('-ipd', '--ipv4_dst', help='ipv4 dst for pkt') parser_send.add_argument('-ips', '--ipv4_src', help='ipv4 src for pkt') parser_send.add_argument( '-n', '--num', help='number of packets to send', default=5, type=int, ) parser_skip = subparsers.add_parser('skip', help='Add flowentry') parser_skip.add_argument( 'table_start', type=int, help='table to insert flowentry', ) parser_skip.add_argument( 'table_end', type=int, help='table to forward to', ) parser_skip.add_argument( '-c', '--cookie', default=0, type=int, help='flowentry cookie value', ) parser_skip.add_argument('-r1', '--reg1', help='flowentry reg1 value') parser_skip.add_argument( '-p', '--priority', help='flowentry priority', type=int, default=65535, ) parser_rem = subparsers.add_parser('rem', help='Remove flowentry') parser_rem.add_argument( '-tid', '--table_id', type=int, help='table to remove flowentry from', ) parser_rem.add_argument( '-p', '--priority', default=65535, type=int, help='rm flowentry matching priority value', ) parser_rem.add_argument( '-c', '--cookie', help='rm flowentry matching cookie value', ) # Add function callbacks parser_dump.set_defaults(func=_simple_get) parser_send.set_defaults(func=_simple_send) parser_skip.set_defaults(func=_simple_add) parser_rem.set_defaults(func=_simple_remove) return parser
[ 129, 1319 ]
def METHOD_NAME(f, n_iters=100, seed=None): """ Transforms a function that computes a sample statistic to a function that estimates the corresponding population statistic and uncertainty via bootstrap. In this version, the positional arguments to the function are assumed to be equal-length arrays, with the ith index of the jth array representing quantity j measured at observation i; the data are assumed correlated across quantities at a fixed observation, and uncorrelated across observations of a fixed quantity. Parameters ---------- f : callable Function of one or more arrays returning a scalar. The positional arguments are assumed to be 1-d arrays of consistent length to be resampled at each iteration. Keyword arguments are passed through but are not resampled. n_iter : int Number of bootstrap iterations seed : int, optional Random seed Returns ------- callable Function with the same input signature as `f`, returning a pair of scalars: (estimate, error) """ return _bootstrap(samples_correlated, f, n_iters, seed)
[ 904, 17674 ]
def METHOD_NAME(self): # type: () -> None self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(): METHOD_NAME = unittest.TestSuite() METHOD_NAME.addTest(unittest.makeSuite(RequestContextTestCase)) return METHOD_NAME
[ 482 ]
def METHOD_NAME(namespace, txs): data = [ "// This file was GENERATED by command:\n", "// generate_bitcoin_fixtures.py\n", "// DO NOT EDIT BY HAND!!!\n" ] newLines = [] newLines.append("#include \""+namespace+'_fixtures.h'+"\"\n") newLines.append("\n") newLines.append("namespace ledger {\n") newLines.append("\tnamespace testing {\n") newLines.append("\t\tnamespace "+namespace+" {\n") apiCalls = [] apiCalls.append("core::api::ExtendedKeyAccountCreationInfo XPUB_INFO(\n") apiCalls.append(' 0, {"main"}, {"44\'/0\'/0\'"}, {"'+xpub+'"}\n') apiCalls.append(');\n') apiCalls.append("std::shared_ptr<core::BitcoinLikeAccount> inflate(const std::shared_ptr<core::WalletPool>& pool, const std::shared_ptr<core::AbstractWallet>& wallet) {\n") apiCalls.append("\tauto account = std::dynamic_pointer_cast<core::BitcoinLikeAccount>(wait(wallet->newAccountWithExtendedKeyInfo(XPUB_INFO)));\n") apiCalls.append("\tsoci::session sql(pool->getDatabaseSessionPool()->getPool());\n") apiCalls.append("\tsql.begin();") for i,tx in enumerate(txs): apiCalls.append("\taccount->putTransaction(sql, *core::JSONUtils::parse<core::TransactionParser>(TX_" + str(i+1) + "));\n") apiCalls.append("\tsql.commit();\n") apiCalls.append("\treturn account;\n") apiCalls.append("}\n") txLines = [] for i,tx in enumerate(txs): txLines.append(('const std::string TX_'+str(i+1)+' = "'+json.dumps(tx).replace('"','\\"')+'";\n')) namespacedLines = apiCalls+txLines for idx, line in enumerate(namespacedLines): namespacedLines[idx] = "\t\t\t"+line newLines += namespacedLines + ["\t\t}\n","\t}\n", "}\n"] result = data+newLines with open(path+namespace+'._fixtures.cpp', 'w+') as file: file.writelines(result) file.close()
[ 93, 7728 ]
def METHOD_NAME(self): GuiTestAssistant.METHOD_NAME(self) BaseTestMixin.METHOD_NAME(self)
[ 531, 481 ]
def METHOD_NAME(self, data): r"""Convert hex string list to binary string list.""" data = np.reshape(np.array(data), [-1]) return [self.hex2bin_str(d) for d in data]
[ -1 ]
def METHOD_NAME(self, query): def escape(v): # TODO: improve this return v.replace('"', r'\"').replace("(", "\\(").replace(")", "\\)") def escape_value(v): if isinstance(v, tuple): # hack for supporting range return f"[{escape(v[0])} TO {escape(v[1])}]" elif isinstance(v, list): # one of return "(%s)" % " OR ".join(escape_value(x) for x in v) else: return '"%s"' % escape(v) if isinstance(query, dict): op = query.pop("_op", "AND") if op.upper() != "OR": op = "AND" op = " " + op + " " q = op.join(f'{k}:{escape_value(v)}' for k, v in query.items()) else: q = query return q
[ 123, 1472 ]
def METHOD_NAME(self, stream: BytesIO, pdu: GCCConferenceCreateResponsePDU): """ Write a GCCConferenceCreateResponsePDU to a stream. :param stream: byte stream to put the ConferenceCreateResponse data in. :param pdu: the PDU to write. """ stream.write(Uint16BE.pack(GCCParser.NODE_ID - 1001)) stream.write(per.writeInteger(1)) stream.write(per.writeEnumeration(0)) stream.write(per.writeNumberOfSet(1)) stream.write(per.writeChoice(0xc0)) stream.write(per.writeOctetStream(GCCParser.H221_SERVER_KEY, 4)) stream.write(per.writeOctetStream(pdu.payload))
[ 77, 12502, 129, 17 ]
def METHOD_NAME(u, v, w): return v * U(w.x[0]) * u
[ 414, 5311 ]
def METHOD_NAME(code): global session global cache if (code in cache): return cache[code] r = session.get(f'{apihost}/api/v1/search/actor?identifier={code}&namespace=ISO-3166-2') if r.status_code != 200: raise Exception("Bad response") resp = r.json() logging.debug(resp) if not resp['success']: raise Exception("Bad response") cache[code] = True if len(resp['data']) > 0 else False logging.debug(f'{code} -> {cache[code]}') return cache[code]
[ 137, 7675, 147 ]
def METHOD_NAME(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, NUMB_OF_NODES)
[ 102, 357 ]
def METHOD_NAME(A='Li', B='Cl'): assert A in ['Li', 'Mg'] # Add Na, K assert B in ['H', 'F', 'Cl', 'O'] # Add Br, I from ase.lattice import bulk if A=='Li': if B=='H': ase_atom = bulk('LiH', 'rocksalt', a=4.0834*A2B) elif B=='F': ase_atom = bulk('LiF', 'rocksalt', a=4.0351*A2B) elif B=='Cl': ase_atom = bulk('LiCl', 'rocksalt', a=5.13*A2B) elif A=='Mg' and B=='O': ase_atom = bulk('MgO', 'rocksalt', a=4.213*A2B) else: raise NotImplementedError('No formula found for system %s %s. ' 'Choose a different system? Or add it to the list!' % (A, B)) return ase_atom
[ 19, 14261, 15019 ]
def METHOD_NAME(self) -> Optional[bool]: return pulumi.get(self, "lookup_projects")
[ 1906, 2847 ]
def METHOD_NAME(self, instance_id): key_required = conf.get('access_key', '') if key_required != '': if key_required != self.get_cookie('access_key', None): self.close() log.debug('%s', 'Websocket opened') self._instance_id = instance_id ClientConnection.number_of_connections += 1 self.set_nodelay(True)
[ 1452 ]
def METHOD_NAME(tree, goal): """DFS in tree for goal.""" for node in tree.keys(): if node == goal: return tree[node] else: result = METHOD_NAME(tree[node], goal) if result is not None: return result
[ 1070, 151 ]
async def METHOD_NAME(request): num_queries = get_num_queries(request) row_ids = random.sample(range(1, 10000), num_queries) worlds = [ ] async with db_pool.acquire() as db_conn: statement = await db_conn.prepare(READ_ROW_SQL) for row_id in row_ids: number = await statement.fetchval(row_id) worlds.append( {"id": row_id, "randomNumber": number} ) return bs.json(worlds)
[ 107, 1267, 815, 9 ]
def METHOD_NAME(self): inputs = (None, None) expected = (None, None) self.check_cube_names(inputs, expected)
[ 9, 2356, 156, 98, 524, 156, 98 ]
def METHOD_NAME(self, send_email): invitation, created = Invitation.objects.invite( from_user=self.user, to_user=self.another_user, obj=self.project, ) self.assertTrue(created) self.assertFalse(invitation.expired) self.assertIsNotNone(invitation.backend) send_email.assert_called_once() send_email.reset_mock() invitation, created = Invitation.objects.invite( from_user=self.user, to_user=self.another_user, obj=self.project, ) self.assertFalse(created) self.assertFalse(invitation.expired) send_email.assert_not_called() self.assertEqual(Invitation.objects.all().count(), 1)
[ 9, 1048, 2430, 21 ]
def METHOD_NAME(data: str, dataset_path: str) -> str: """ Given a dataset name, fetch the yaml config for the dataset from the Ultralytics dataset repo, overwrite its 'path' attribute (dataset root dir) to point to the `dataset_path` and finally save it to the current working directory. This allows to create load data yaml config files that point to the arbitrary directories on the disk. :param data: name of the dataset (e.g. "coco.yaml") :param dataset_path: path to the dataset directory :return: a path to the new yaml config file (saved in the current working directory) """ ultralytics_dataset_path = glob.glob(os.path.join(ROOT, "**", data), recursive=True) if len(ultralytics_dataset_path) != 1: raise ValueError( "Expected to find a single path to the " f"dataset yaml file: {data}, but found {ultralytics_dataset_path}" ) ultralytics_dataset_path = ultralytics_dataset_path[0] with open(ultralytics_dataset_path, "r") as f: yaml_config = yaml.safe_load(f) yaml_config["path"] = dataset_path yaml_save_path = os.path.join(os.getcwd(), data) # save the new dataset yaml file with open(yaml_save_path, "w") as outfile: yaml.dump(yaml_config, outfile, default_flow_style=False) return yaml_save_path
[ 365, 280, 126, 157 ]
def METHOD_NAME(tmp_path_factory, cert_dir): """Starts a new Postgres db that is shared for tests in this process""" METHOD_NAME = Postgres(tmp_path_factory.getbasetemp() / "pgdata") METHOD_NAME.initdb() os.truncate(METHOD_NAME.hba_path, 0) if TLS_SUPPORT: with METHOD_NAME.conf_path.open("a") as f: cert = cert_dir / "TestCA1" / "sites" / "01-localhost.crt" key = cert_dir / "TestCA1" / "sites" / "01-localhost.key" f.write(f"ssl_cert_file='{cert}'\n") f.write(f"ssl_key_file='{key}'\n") METHOD_NAME.nossl_access("all", "trust") METHOD_NAME.nossl_access("p4", "password") METHOD_NAME.nossl_access("p5", "md5") if PG_SUPPORTS_SCRAM: METHOD_NAME.nossl_access("p6", "scram-sha-256") METHOD_NAME.commit_hba() METHOD_NAME.start() for i in range(8): METHOD_NAME.sql(f"create database p{i}") METHOD_NAME.sql("create database unconfigured_auth_database") METHOD_NAME.sql("create user bouncer") METHOD_NAME.sql("create user pswcheck with superuser createdb password 'pgbouncer-check';") METHOD_NAME.sql("create user someuser with password 'anypasswd';") METHOD_NAME.sql("create user maxedout;") METHOD_NAME.sql("create user maxedout2;") METHOD_NAME.sql(f"create user longpass with password '{LONG_PASSWORD}';") METHOD_NAME.sql("create user stats password 'stats';") if PG_SUPPORTS_SCRAM: METHOD_NAME.sql("set password_encryption = 'md5'; create user muser1 password 'foo';") METHOD_NAME.sql("set password_encryption = 'md5'; create user muser2 password 'wrong';") METHOD_NAME.sql("set password_encryption = 'md5'; create user puser1 password 'foo';") METHOD_NAME.sql("set password_encryption = 'md5'; create user puser2 password 'wrong';") METHOD_NAME.sql( "set password_encryption = 'scram-sha-256'; create user scramuser1 password '" "SCRAM-SHA-256$4096:D76gvGUVj9Z4DNiGoabOBg==$RukL0Xo3Ql/2F9FsD7mcQ3GATG2fD3PA71qY1JagGDs=:BhKUwyyivFm7Tq2jDJVXSVRbRDgTWyBilZKgg6DDuYU=" "'" ) METHOD_NAME.sql( "set password_encryption = 'scram-sha-256'; create user scramuser3 password 'baz';" ) else: METHOD_NAME.sql("set password_encryption = 'on'; create user muser1 password 'foo';") METHOD_NAME.sql("set password_encryption = 'on'; create user muser2 password 'wrong';") METHOD_NAME.sql("set password_encryption = 'on'; create user puser1 password 'foo';") METHOD_NAME.sql("set password_encryption = 'on'; create user puser2 password 'wrong';") yield METHOD_NAME METHOD_NAME.cleanup()
[ 11 ]
def METHOD_NAME(self): """ A non-batch user can be assigned a batch draft The user cannot change the public bodies, but is able to sent the request. """ user = User.objects.get(email="[email protected]") old_project = factories.FoiProjectFactory(user=user) ok = self.client.login(email=user.email, password="froide") self.assertTrue(ok) draft = factories.RequestDraftFactory.create(user=user) draft.publicbodies.add(self.pb1, self.pb2) evil_pb3 = PublicBody.objects.filter(jurisdiction__slug="nrw")[1] pb_ids = [self.pb1.pk, self.pb2.pk] response = self.client.get(draft.get_absolute_url()) self.assertEqual(response.status_code, 200) mail.outbox = [] draft.project = old_project draft.save() data = { "subject": "Test-Subject", "body": "This is another test body with Ümläut€n", "public": "on", "publicbody": pb_ids + [evil_pb3], "draft": draft.pk, } request_url = reverse("foirequest-make_request") response = self.client.post(request_url, data) self.assertContains(response, "Draft cannot be used again", status_code=400) draft.project = None draft.save() with self.captureOnCommitCallbacks(execute=True): response = self.client.post(request_url, data) self.assertEqual(response.status_code, 302) project = FoiProject.objects.get(title=data["subject"]) self.assertEqual(set(pb_ids), set(x.id for x in project.publicbodies.all())) self.assertEqual(len(mail.outbox), 3) # two pbs, one user to user
[ 9, 255, 155 ]
def METHOD_NAME(self): self.install_license("COPYING") # fool packages looking to link to non-wide-character ncurses libraries for lib in ["curses", "ncurses", "form", "panel", "menu"]: libp = self.destdir / "usr/lib" / f"lib{lib}.so" libp.unlink(missing_ok=True) libp.with_suffix(".a").unlink(missing_ok=True) with open(libp, "w") as f: f.write(f"INPUT(-l{lib}w)\n") libp.chmod(0o755) self.install_link(f"lib{lib}w.a", f"usr/lib/lib{lib}.a") self.rm(self.destdir / "usr/lib/libncurses++.a", force=True) self.install_link("libncurses++w.a", "usr/lib/libncurses++.a") # some packages look for -lcurses during build self.rm(self.destdir / "usr/lib/libcursesw.so", force=True) with open(self.destdir / "usr/lib/libcursesw.so", "w") as f: f.write("INPUT(-lncursesw)\n") (self.destdir / "usr/lib/libcursesw.so").chmod(0o755) self.rm(self.destdir / "usr/lib/libcurses.so", force=True) self.rm(self.destdir / "usr/lib/libcursesw.a", force=True) self.rm(self.destdir / "usr/lib/libcurses.a", force=True) self.install_link("libncurses.so", "usr/lib/libcurses.so") self.install_link("libncursesw.a", "usr/lib/libcursesw.a") self.install_link("libncurses.a", "usr/lib/libcurses.a") # create libtinfo symlinks self.install_link("libncursesw.so", "usr/lib/libtinfo.so") self.install_link( f"libncursesw.so.{pkgver}", f"usr/lib/libtinfo.so.{pkgver}" ) self.install_link( f"libtinfo.so.{pkgver}", f"usr/lib/libtinfo.so.{pkgver[0:pkgver.find('.')]}", ) self.install_link("ncursesw.pc", "usr/lib/pkgconfig/tinfo.pc") # remove broken symlink self.rm(self.destdir / "usr/lib/terminfo", force=True)
[ 72, 428 ]
def METHOD_NAME(self, out, session): out.writerow([ 'Game', 'Studio', 'Studio URL', 'Primary Contact Names', 'Primary Contact Emails', 'Game Website', 'Twitter', 'Facebook', 'Other Social Media', 'Genres', 'Brief Description', 'Long Description', 'How to Play', 'Link to Video for Judging', 'Link to Promo Video', 'Link to Game', 'Game Link Password', 'Game Requires Codes?', 'Code Instructions', 'Build Status', 'Build Notes', 'Game Submitted', 'Current Status', 'Registered', 'Accepted', 'Confirmation Deadline', 'Screenshot Links', 'Average Score', 'Individual Scores' ]) for game in session.indie_games(): out.writerow([ game.title, game.studio.name, '{}/mivs/continue_app?id={}'.format(c.PATH, game.studio.id), game.studio.primary_contact_first_names, game.studio.email, game.link_to_webpage, game.twitter, game.facebook, game.other_social_media, ' / '.join(game.genres_labels), game.brief_description, game.description, game.how_to_play, game.link_to_video, game.link_to_promo_video, game.link_to_game, game.password_to_game, game.code_type_label, game.code_instructions, game.build_status_label, game.build_notes, 'submitted' if game.submitted else 'not submitted', 'accepted and confirmed' if game.confirmed else game.status_label, game.registered.strftime('%Y-%m-%d'), 'n/a' if not game.accepted else game.accepted.strftime('%Y-%m-%d'), 'n/a' if not game.accepted else game.studio.confirm_deadline.strftime('%Y-%m-%d'), '\n'.join(c.URL_BASE + screenshot.url.lstrip('.') for screenshot in game.screenshots), str(game.average_score) ] + [str(score) for score in game.scores])
[ 8211 ]
def METHOD_NAME(self, PanelItem, ItemsNumber, Move, SrcPath, OpMode): log.debug( "VFS.PutFiles({0}, {1}, {2}, {3}, {4})".format( PanelItem, ItemsNumber, Move, SrcPath, OpMode, ) ) return 0
[ 1276, 1537 ]
def METHOD_NAME(self) -> None: """Create a QueryRunner Instance""" return cls_timeout(TEN_MIN)( QueryRunner( session=self.session, table=self.table, sample=self.sample, partition_details=self.table_partition_config, profile_sample_query=self.table_sample_query, ) )
[ 129, 1102 ]
def METHOD_NAME(jpackage, name): return getattr(getattr(jpackage, name + '$'), 'MODULE$')
[ 10713, 279 ]
def METHOD_NAME(self) -> None: self._rowbatch_q.put_nowait( _Rowbatch(vpb.RowBatchData(), close_table=True))
[ 1462 ]
def METHOD_NAME(*args): print("mocking _generate_breakdown_df()") return pd.DataFrame({ "state_fips": ["01", "02", "03"], "state_name": ["SomeState01", "SomeState02", "SomeState03"], "race_category_id": ["BLACK", "BLACK", "BLACK"], "race_and_ethnicity": ["Black", "Black", "Black"], "fake_col1": [0, 1, 2], "fake_col2": ["a", "b", "c"] })
[ 567, 11316, 2057 ]
def METHOD_NAME(): call_command("createtestuser", silent=True)
[ 102, 21 ]
def METHOD_NAME(intersect): center = coordinates.SkyCoord(ra=10.8, dec=32.2, unit="deg") radius = coordinates.Angle(1.5, unit="deg") cone_region = CircleSkyRegion(center, radius) request_payload = MOCServer.query_region( region=cone_region, intersect=intersect, get_query_payload=True ) if intersect == "encloses": assert request_payload["intersect"] == "enclosed" else: assert request_payload["intersect"] == intersect
[ 9, 3801, 49 ]
def METHOD_NAME(self): yz_plane = BluemiraPlane([0, 0, 0], [1, 0, 0]) yz_plane_2 = BluemiraPlane.from_3_points([0, 0, 0], [0, 1, 0], [0, 0, 1]) np.testing.assert_equal(yz_plane.axis, yz_plane_2.axis) np.testing.assert_equal(yz_plane.base, yz_plane_2.base)
[ 9, 5237, 9430 ]
def METHOD_NAME(args, project, package, nsource): if nsource is None: nsource = '' file_path = "{0}/{1}/{1}.spec".format(project, package) for line in obs_get_file(args, file_path).split('\n'): source = match('^Source{}:\s*(.+)$'.format(nsource), line) if source: return source.group(1)
[ 1834, 19, 360, 1458 ]
def METHOD_NAME(self): self.requires("libiconv/1.17") self.requires("libsndfile/1.2.2") self.requires("libcap/2.68") self.requires("libtool/2.4.7") if self.options.with_alsa: self.requires("libalsa/1.2.7.2") if self.options.with_glib: self.requires("glib/2.77.2") if self.options.get_safe("with_fftw"): self.requires("fftw/3.3.10") if self.options.with_x11: self.requires("xorg/system") if self.options.with_openssl: self.requires("openssl/[>=1.1 <4]") if self.options.with_dbus: self.requires("dbus/1.15.8")
[ 5186 ]
def METHOD_NAME(self, x): if self.num_col is None: self.num_col = len(x) self.count_numerical = np.zeros(self.num_col) self.count_categorical = np.zeros(self.num_col) for _ in range(len(x)): self.count_unique_numerical.append({}) for i in range(self.num_col): x[i] = x[i].decode("utf-8") try: tmp_num = float(x[i]) self.count_numerical[i] += 1 if tmp_num not in self.count_unique_numerical[i]: self.count_unique_numerical[i][tmp_num] = 1 else: self.count_unique_numerical[i][tmp_num] += 1 except ValueError: self.count_categorical[i] += 1
[ 86, 89 ]
def METHOD_NAME(self): for i in range(MAX_SET_SIZE): socket = self.outputs[i] desired_state = (i > (self.num_sockets_per_set - 1)) if socket.hide != desired_state: socket.hide_safe = desired_state
[ 0, -1, 146, 6424, 24, -1, 41 ]
def METHOD_NAME(cls, _root, info, /, **data): manifest_url = data.get("manifest_url") clean_manifest_url(manifest_url) manifest_data = cls.fetch_manifest(manifest_url) cls.clean_manifest_data(info, manifest_data) instance = cls.construct_instance(instance=None, cleaned_data=manifest_data) return cls.success_response(instance)
[ 407, 87 ]
def METHOD_NAME(p, p_recv=None): two_pandas = p_recv is not None p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) if two_pandas: p_recv.set_safety_mode(Panda.SAFETY_ALLOUTPUT) # enable output mode else: p.set_can_loopback(True) tests = [ [500, 1000, 2000], # speeds [93, 87, 78], # saturation thresholds ] for i in range(len(tests[0])): # set bus 0 data speed to speed p.set_can_data_speed_kbps(0, tests[0][i]) if p_recv is not None: p_recv.set_can_data_speed_kbps(0, tests[0][i]) time.sleep(0.05) comp_kbps = time_many_sends(p, 0, p_recv=p_recv, msg_count=400, two_pandas=two_pandas, msg_len=64) # bit count from https://en.wikipedia.org/wiki/CAN_bus saturation_pct = (comp_kbps / tests[0][i]) * 100.0 assert saturation_pct > tests[1][i] assert saturation_pct < 100
[ 9, 9980, 12801 ]
def METHOD_NAME(self): """Return the current text if it's a valid value else None Note: The empty placeholder value is valid and returns as "" """ text = self.currentText() lookup = set(self.itemText(i) for i in range(self.count())) if text not in lookup: return None return text or None
[ 19, 1205, 99 ]
def METHOD_NAME(message: types.Message): keyboard = types.InlineKeyboardMarkup( keyboard=[ [types.InlineKeyboardButton(text='callback data', callback_data='example')], [types.InlineKeyboardButton(text='ignore case callback data', callback_data='ExAmPLe')] ] ) bot.send_message(message.chat.id, message.text, reply_markup=keyboard)
[ 353, 1076 ]
def METHOD_NAME(self) -> None: if self._conn is None: return self._conn.close() self._clear()
[ 1462 ]
def METHOD_NAME(job_name): envs = [] if "ps" == job_name: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") elif "worker" == job_name: envs.append("OMP_NUM_THREADS=6") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") elif "evaluator" == job_name or "chief" == job_name: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") else: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") return envs
[ 0, 5, 6 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.local_rulestack_name = AAZStrArg( options=["-n", "--name", "--local-rulestack-name"], help="LocalRulestack resource name", required=True, id_part="name", ) _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.skip = AAZStrArg( options=["--skip"], help="LocalRulestack resource skip", ) _args_schema.top = AAZIntArg( options=["--top"], help="LocalRulestack resource top", ) _args_schema.type = AAZStrArg( options=["--type"], help="LocalRulestack resource type", required=True, enum={"antiSpyware": "antiSpyware", "antiVirus": "antiVirus", "dnsSubscription": "dnsSubscription", "fileBlocking": "fileBlocking", "ipsVulnerability": "ipsVulnerability", "urlFiltering": "urlFiltering"}, ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(self): tree = self.svc.node.devtree self.remove_dev_holders(self.device, tree)
[ 188, 11965 ]
def METHOD_NAME(self, bytestring, message=None, policy=None): if policy is None: policy = self.policy if message is None: message = self.message return email.message_from_bytes(bytestring, message, policy=policy)
[ 321, 169 ]
def METHOD_NAME(self): with create_modules(MODNAME) as mods: with mods[MODNAME].open("a") as fp: print( """
[ 9, 612, 2355 ]
def METHOD_NAME(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0)
[ 638, 768, 926 ]
def METHOD_NAME(entries): return {"entries": [{"leaf_input": entry.leaf_input.encode("base64"), "extra_data": entry.extra_data.encode("base64")} for entry in entries]}
[ 109, 24, 763 ]
def METHOD_NAME(self) -> DF: """ :return:the target column as a pandas DataFrame: if you need a Series, just call `y.squeeze()`. """ return self.data.iloc[:, [self.dataset.target.index]]
[ 320 ]
async def METHOD_NAME(session_manager, session): session_manager._active.active_id = session.meta.identifier assert session_manager.deactivate(session.meta.identifier) is session assert session_manager._active.active_id is None
[ 9, 931 ]
def METHOD_NAME(self): reline = re.compile(r"^\* \[\[([^]]*)\]\][^[]*\[\[([^]]*)\]\][^[]*(?:\[\[([^]]*)\]\][^[]*)?(?:\[\[([^]]*)\]\][^[]*)?") data = urlread(u"https://fr.wikipedia.org/wiki/Liste_des_essences_forestières_européennes?action=raw", 1) #data = open(u"Liste_des_essences_forestières_européennes?action=raw").read() data = data.split("\n") for line in data: for res in reline.findall(line): for n in res[0].split('|'): self.Tree[self.normalize(n)] = {'genus':res[1], 'species':'|'.join(res[2:3]), 'species:fr':res[0]}
[ 10515, 12838, -1, -1 ]
def METHOD_NAME(cls, value): if hasattr(value, "fire"): cls._trigger = value else: raise AttributeError, "Handler instance must have 'fire' attribute"
[ 0, 2117 ]
def METHOD_NAME(request): standalone = DirSrv(verbose=False) standalone.log.debug("Instance allocated") args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, # SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID} standalone.allocate(args) if standalone.exists(): standalone.delete() standalone.create() standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
[ 4567 ]
def METHOD_NAME(self) -> None: self.app_context.pop()
[ 531, 481 ]
def METHOD_NAME( app: LinOTPApp, runner: FlaskCliRunner, freezer: FrozenDateTimeFactory, export_dir: Path, setup_audit_table: None, ): freezer.move_to("2020-01-01 09:50:00") formated_time = datetime.now().strftime( app.config["BACKUP_FILE_TIME_FORMAT"] ) runner.invoke( cli_main, [ "-vv", "audit", "cleanup", "--max", "10", "--min", "10", "--no-export", "--exportdir", str(export_dir), ], ) deleted = AUDIT_AMOUNT_ENTRIES - 10 filename = f"SQLAuditExport.{formated_time}.{deleted}.csv" export_file_backup_dir = Path(app.config["BACKUP_DIR"]) / filename assert not export_file_backup_dir.is_file() assert len(list(export_dir.iterdir())) == 0
[ 9, 1422, 950, 1295, 294 ]
def METHOD_NAME( self, deviceupdate_endpoint, deviceupdate_instance_id, deviceupdate_device_group ): client = self.create_client(endpoint=deviceupdate_endpoint, instance_id=deviceupdate_instance_id) response = client.device_management.list_best_updates_for_group(deviceupdate_device_group) result = [item for item in response] assert len(result) > 0
[ 9, 19, 2192, 682, 43, 846 ]
def METHOD_NAME(self, allow_ips, connect_to, addresses, expected): ''' Start a node with requested rpcallowip and rpcbind parameters, then try to connect, and check if the set of bound addresses matches the expected set. ''' self.log.info("Bind test for %s" % str(addresses)) expected = [(addr_to_hex(addr), port) for (addr, port) in expected] base_args = ['-disablewallet', '-nolisten'] if allow_ips: base_args += ['-rpcallowip=' + x for x in allow_ips] binds = ['-rpcbind='+addr for addr in addresses] self.nodes[0].rpchost = connect_to self.start_node(0, base_args + binds) pid = self.nodes[0].process.pid assert_equal(set(get_bind_addrs(pid)), set(expected)) self.stop_nodes()
[ 22, 287, 9 ]
def METHOD_NAME(self): return EvaluateParam(eval_type="regression", metrics=self.metrics)
[ 19, 1097, 49 ]
def METHOD_NAME( self, account_id, auth_token, api_url, download_url, minimum_part_size, application_key, realm, allowed=None, application_key_id=None, s3_api_url=None, ): if 's3_api_url' in inspect.getfullargspec(self._set_auth_data).args: s3_kwargs = dict(s3_api_url=s3_api_url) else: s3_kwargs = {} if allowed is None: allowed = self.DEFAULT_ALLOWED assert self.allowed_is_valid(allowed) self._set_auth_data( account_id=account_id, auth_token=auth_token, api_url=api_url, download_url=download_url, minimum_part_size=minimum_part_size, application_key=application_key, realm=realm, allowed=allowed, application_key_id=application_key_id, **s3_kwargs, )
[ 0, 2433, 365 ]
def METHOD_NAME(fabric_name: Optional[pulumi.Input[str]] = None, network_mapping_name: Optional[pulumi.Input[str]] = None, network_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, resource_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetReplicationNetworkMappingResult]: """ Gets the details of an ASR network mapping :param str fabric_name: Primary fabric name. :param str network_mapping_name: Network mapping name. :param str network_name: Primary network name. :param str resource_group_name: The name of the resource group where the recovery services vault is present. :param str resource_name: The name of the recovery services vault. """ ...
[ 19, 3185, 1228, 445, 146 ]
def METHOD_NAME(cls): """Returns all massager subclasses""" return set(cls._massagers.values())
[ 19, -1 ]
def METHOD_NAME(self): return ShapeNetDataset( self, "trainval", self.num_points, self.normal_channel )
[ 11967 ]
def METHOD_NAME(self): symbols = symtable.symtable("def f(x): return x", "?", "exec")
[ 9, 1005 ]
def METHOD_NAME(executable): """ Search executable in PATH environment. Return path if found, None if not. """ path = os.environ.get('PATH') if not path: return for p in path.split(os.path.pathsep): p = os.path.join(p, executable) if os.path.exists(p): return p p += '.exe' if os.path.exists(p): return p
[ 416, 1005 ]
def METHOD_NAME(actions): if actions: return random.choice(actions) return 0
[ 236, 4220 ]
METHOD_NAME(self, tree, startTime):
[ 623, 463 ]
def METHOD_NAME(conf): conf.xcheck_var('CHOST', cross=True) conf.env.CHOST = conf.env.CHOST or [conf.env.DEST_OS] conf.env.DEST_OS = conf.env.CHOST[0].replace('-','_') conf.xcheck_host_prog('CC', 'gcc') conf.xcheck_host_prog('CXX', 'g++') conf.xcheck_host_prog('LINK_CC', 'gcc') conf.xcheck_host_prog('LINK_CXX', 'g++') conf.xcheck_host_prog('AR', 'ar') conf.xcheck_host_prog('AS', 'as') conf.xcheck_host_prog('LD', 'ld') conf.xcheck_host_envar('CFLAGS') conf.xcheck_host_envar('CXXFLAGS') conf.xcheck_host_envar('LDFLAGS', 'LINKFLAGS') conf.xcheck_host_envar('LIB') conf.xcheck_host_envar('PKG_CONFIG_LIBDIR') conf.xcheck_host_envar('PKG_CONFIG_PATH') if not conf.env.env: conf.env.env = {} conf.env.env.update(os.environ) if conf.env.PKG_CONFIG_LIBDIR: conf.env.env['PKG_CONFIG_LIBDIR'] = conf.env.PKG_CONFIG_LIBDIR[0] if conf.env.PKG_CONFIG_PATH: conf.env.env['PKG_CONFIG_PATH'] = conf.env.PKG_CONFIG_PATH[0]
[ 10306, 1806 ]
def METHOD_NAME(build_service_name: Optional[str] = None, resource_group_name: Optional[str] = None, service_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBuildServiceResourceUploadUrlResult: """ Get an resource upload URL for build service, which may be artifacts or source archive. Azure REST API version: 2023-05-01-preview. :param str build_service_name: The name of the build service resource. :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param str service_name: The name of the Service resource. """ __args__ = dict() __args__['buildServiceName'] = build_service_name __args__['resourceGroupName'] = resource_group_name __args__['serviceName'] = service_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:appplatform:getBuildServiceResourceUploadUrl', __args__, opts=opts, typ=GetBuildServiceResourceUploadUrlResult).value return AwaitableGetBuildServiceResourceUploadUrlResult( relative_path=pulumi.get(__ret__, 'relative_path'), upload_url=pulumi.get(__ret__, 'upload_url'))
[ 19, 56, 549, 191, 172, 274 ]
def METHOD_NAME(self, *args, **kwargs): if not self._connected: # pylint: disable=protected-access raise RuntimeError("FTP client is not connected") return func(self, *args, **kwargs)
[ 291 ]
def METHOD_NAME(): # Load E1 and A1B scenarios using the callback to update the metadata. scenario_files = [ iris.sample_data_path(fname) for fname in ["E1.2098.pp", "A1B.2098.pp"] ] scenarios = iris.load(scenario_files, callback=cop_metadata_callback) # Load the preindustrial reference data. preindustrial = iris.load_cube(iris.sample_data_path("pre-industrial.pp")) # Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the # specific colours. levels = np.arange(20) - 2.5 red = ( np.array( [ 0, 0, 221, 239, 229, 217, 239, 234, 228, 222, 205, 196, 161, 137, 116, 89, 77, 60, 51, ] ) / 256.0 ) green = ( np.array( [ 16, 217, 242, 243, 235, 225, 190, 160, 128, 87, 72, 59, 33, 21, 29, 30, 30, 29, 26, ] ) / 256.0 ) blue = ( np.array( [ 255, 255, 243, 169, 99, 51, 63, 37, 39, 21, 27, 23, 22, 26, 29, 28, 27, 25, 22, ] ) / 256.0 ) # Put those colours into an array which can be passed to contourf as the # specific colours for each level. colors = np.stack([red, green, blue], axis=1) # Make a wider than normal figure to house two maps side-by-side. fig, ax_array = plt.subplots(1, 2, figsize=(12, 5)) # Loop over our scenarios to make a plot for each. for ax, experiment, label in zip( ax_array, ["E1", "A1B"], ["E1", "A1B-Image"] ): exp_cube = scenarios.extract_cube( iris.Constraint(Experiment=experiment) ) time_coord = exp_cube.coord("time") # Calculate the difference from the preindustial control run. exp_anom_cube = exp_cube - preindustrial # Plot this anomaly. plt.sca(ax) ax.set_title(f"HadGEM2 {label} Scenario", fontsize=10) contour_result = iplt.contourf( exp_anom_cube, levels, colors=colors, extend="both" ) plt.gca().coastlines() # Now add a colourbar who's leftmost point is the same as the leftmost # point of the left hand plot and rightmost point is the rightmost # point of the right hand plot. # Get the positions of the 2nd plot and the left position of the 1st plot. left, bottom, width, height = ax_array[1].get_position().bounds first_plot_left = ax_array[0].get_position().bounds[0] # The width of the colorbar should now be simple. width = left - first_plot_left + width # Add axes to the figure, to place the colour bar. colorbar_axes = fig.add_axes([first_plot_left, 0.18, width, 0.03]) # Add the colour bar. cbar = plt.colorbar( contour_result, colorbar_axes, orientation="horizontal" ) # Label the colour bar and add ticks. cbar.set_label(preindustrial.units) cbar.ax.tick_params(length=0) # Get the time datetime from the coordinate. time = time_coord.units.num2date(time_coord.points[0]) # Set a title for the entire figure, using the year from the datetime # object. Also, set the y value for the title so that it is not tight to # the top of the plot. fig.suptitle( f"Annual Temperature Predictions for {time.year}", y=0.9, fontsize=18, ) iplt.show()
[ 57 ]
def METHOD_NAME(self, file): if self.multi: file.write("".join(self.pieces)) else: file.write("".join(self.clean_pieces(self.pieces)))
[ 77, 24, 171 ]
def METHOD_NAME(db, client, admin_jwt, user): invoice = get_invoice(db, user) response = client.delete( f'/v1/event-invoices/{invoice.id}', content_type='application/vnd.api+json', headers=admin_jwt, ) assert response.status_code == 405
[ 9, 2486, 34, 2870, 168 ]
def METHOD_NAME(self, txid): return self.compose_request('rawtx', txid, {'format': 'hex'})
[ 17242 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("AccessReviewContactedReviewerListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(self): """Main test logic""" self.log.info( "Test importaddress with label and importprivkey without label." ) self.log.info("Import a watch-only address with a label.") address = self.nodes[0].getnewaddress() label = "Test Label" self.nodes[1].importaddress(address, label) test_address(self.nodes[1], address, iswatchonly=True, ismine=False, labels=[label]) self.log.info( "Import the watch-only address's private key without a " "label and the address should keep its label." ) priv_key = self.nodes[0].dumpprivkey(address) self.nodes[1].importprivkey(priv_key) test_address(self.nodes[1], address, labels=[label]) self.log.info( "Test importaddress without label and importprivkey with label." ) self.log.info("Import a watch-only address without a label.") address2 = self.nodes[0].getnewaddress() self.nodes[1].importaddress(address2) test_address(self.nodes[1], address2, iswatchonly=True, ismine=False, labels=[""]) self.log.info( "Import the watch-only address's private key with a " "label and the address should have its label updated." ) priv_key2 = self.nodes[0].dumpprivkey(address2) label2 = "Test Label 2" self.nodes[1].importprivkey(priv_key2, label2) test_address(self.nodes[1], address2, labels=[label2]) self.log.info("Test importaddress with label and importprivkey with label.") self.log.info("Import a watch-only address with a label.") address3 = self.nodes[0].getnewaddress() label3_addr = "Test Label 3 for importaddress" self.nodes[1].importaddress(address3, label3_addr) test_address(self.nodes[1], address3, iswatchonly=True, ismine=False, labels=[label3_addr]) self.log.info( "Import the watch-only address's private key with a " "label and the address should have its label updated." ) priv_key3 = self.nodes[0].dumpprivkey(address3) label3_priv = "Test Label 3 for importprivkey" self.nodes[1].importprivkey(priv_key3, label3_priv) test_address(self.nodes[1], address3, labels=[label3_priv]) self.log.info( "Test importprivkey won't label new dests with the same " "label as others labeled dests for the same key." ) self.log.info("Import a watch-only p2sh-segwit address with a label.") address4 = self.nodes[0].getnewaddress("", "p2sh-segwit") label4_addr = "Test Label 4 for importaddress" self.nodes[1].importaddress(address4, label4_addr) test_address(self.nodes[1], address4, iswatchonly=True, ismine=False, labels=[label4_addr], embedded=None) self.log.info( "Import the watch-only address's private key without a " "label and new destinations for the key should have an " "empty label while the 'old' destination should keep " "its label." ) priv_key4 = self.nodes[0].dumpprivkey(address4) self.nodes[1].importprivkey(priv_key4) embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address'] test_address(self.nodes[1], embedded_addr, labels=[""]) test_address(self.nodes[1], address4, labels=[label4_addr]) self.stop_nodes()
[ 22, 9 ]
def METHOD_NAME(layer: Shapes): """.""" if not layer._is_creating: layer.remove_selected()
[ 34, 449, 333 ]