text
stringlengths
15
7.82k
ids
sequencelengths
1
7
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("AccessReviewDecisionListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(self, x, level, left, right, maxiter, trail=''): n = len(x) if level <= 0 and n: s = '...' else: newlevel = level - 1 repr1 = self.repr1 pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] if n > maxiter: pieces.append('...') s = ', '.join(pieces) if n == 1 and trail: right = trail + right return '%s%s%s' % (left, s, right)
[ 92, 2439 ]
def METHOD_NAME(self, instance): invalid = self.get_invalid(instance) if invalid: self.log.error("Invalid nodes: {0}".format(invalid)) raise PublishValidationError( ("Invalid particle caches in instance. " "See logs for details."))
[ 356 ]
def METHOD_NAME(self): my_analyzer = RegexTokenizer("[a-zA-Z_]+") | LowercaseFilter() | StopFilter() METHOD_NAME = Schema( h=TEXT(stored=True, analyzer=my_analyzer), gnx=ID(stored=True), b=TEXT(analyzer=my_analyzer), parent=ID(stored=True), doc=ID(stored=True), ) return METHOD_NAME
[ 135 ]
def METHOD_NAME(logger: logging.Logger, file: Path | None = None) -> logging.Handler: """ Add a logging handler. If ``file`` is specified, log to file. Otherwise, add a handler to stdout. """ fmt = '[%(asctime)s] %(levelname)s (%(threadName)s:%(name)s) %(message)s' datefmt = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(fmt, datefmt) if file is None: # Log to stdout. handler = logging.StreamHandler(sys.stdout) else: handler = logging.FileHandler(file) handler.setLevel(level=logging.DEBUG) # Print all the logs. handler.setFormatter(formatter) logger.addHandler(handler) return handler
[ 238, 1519 ]
def METHOD_NAME(self): self.assertEqual( "https://www.simply-cookit.com/sites/default/files/styles/square/public/assets/image/2021/03/gnocchi-mit-zuckerschoten-und-getrockneten-tomaten-in-parmesansosse_portrait.jpg?h=526df0cf&itok=wxxX6IM7", self.harvester_class.image(), )
[ 9, 660 ]
def METHOD_NAME(data): """Makes sure the dtype of the data is accetpable to vispy. Acceptable types are int8, uint8, int16, uint16, float32. Parameters ---------- data : np.ndarray Data that will need to be of right type. Returns ------- np.ndarray Data that is of right type and will be passed to vispy. """ dtype = np.dtype(data.dtype) if dtype in texture_dtypes: return data try: dtype = { "i": np.float32, "f": np.float32, "u": np.uint16, "b": np.uint8, }[dtype.kind] except KeyError as e: # not an int or float raise TypeError( trans._( 'type {dtype} not allowed for texture; must be one of {textures}', deferred=True, dtype=dtype, textures=set(texture_dtypes), ) ) from e return data.astype(dtype)
[ 1112, 365, 1249 ]
def METHOD_NAME( self, df, args ): request = ml_pb2.PredictCall(context=self.context, df=pickle.dumps(df), args=json.dumps(args)) resp = self.stub.Predict(request) logger.info("%s.learn: returned error - %s, error_message - %s", self.__class__.__name__, resp.error_code, resp.error_message) if resp.error_code and resp.error_message: raise Exception(resp.error_message) return pickle.loads(resp.data_frame)
[ 2103 ]
def METHOD_NAME(dir=os.curdir): """ Find all files under 'dir' and return the list of full filenames. Unless dir is '.', return full filenames with dir prepended. """ files = _find_all_simple(dir) if dir == os.curdir: make_rel = functools.partial(os.path.relpath, start=dir) files = map(make_rel, files) return list(files)
[ 7892 ]
def METHOD_NAME(self): CloudPlatform.objects.all().delete()
[ 0, 1 ]
def METHOD_NAME(): y_none = pd.Series([None]) y_pdna = pd.Series([pd.NA]) y_nan = pd.Series([np.nan]) y_all_null = pd.Series([None, np.nan, np.nan]) with pytest.raises(ValueError, match="Less than 2"): detect_problem_type(y_none) with pytest.raises(ValueError, match="Less than 2"): detect_problem_type(y_pdna) with pytest.raises(ValueError, match="Less than 2"): detect_problem_type(y_nan) with pytest.raises(ValueError, match="Less than 2"): detect_problem_type(y_all_null)
[ 9, 4082, 98, 5182 ]
def METHOD_NAME(self): parameters = { **self.serialize_query_param( "$filter", self.ctx.args.filter, ), **self.serialize_query_param( "$orderby", self.ctx.args.orderby, ), **self.serialize_query_param( "$top", self.ctx.args.top, ), **self.serialize_query_param( "api-version", "2022-11-01", required=True, ), } return parameters
[ 539, 386 ]
def METHOD_NAME(mocker): x = torch.ones((10, 4, 4, 4)) indexes = torch.LongTensor([0, 1, 2]) mock_tensor_meta = mocker.stub traced_x = TracedTensor.from_torch_tensor(torch.ones((10, 4, 4, 4)), mock_tensor_meta) traced_indexes = TracedTensor.from_torch_tensor(torch.LongTensor([0, 1, 2]), mock_tensor_meta) SHAPE_1 = [3, 4, 4, 4] SHAPE_2 = [3, 4, 4] assert list(x[indexes].shape) == SHAPE_1 assert list(x[traced_indexes].shape) == SHAPE_1 assert list(traced_x[indexes].shape) == SHAPE_1 assert list(traced_x[traced_indexes].shape) == SHAPE_1 assert list(x[indexes, indexes].shape) == SHAPE_2 assert list(x[traced_indexes, traced_indexes].shape) == SHAPE_2 assert list(traced_x[indexes, indexes].shape) == SHAPE_2 assert list(traced_x[traced_indexes, traced_indexes].shape) assert list(x[indexes, traced_indexes].shape) == SHAPE_2 assert list(traced_x[indexes, traced_indexes].shape) == SHAPE_2
[ 9, 3296, 768, 5181, 3415 ]
async def METHOD_NAME(sources_paths: SourcesPaths) -> StrippedSourceFileNames: if not sources_paths.files: return StrippedSourceFileNames() source_root = await Get( SourceRoot, SourceRootRequest, SourceRootRequest.for_file(sources_paths.files[0]) ) if source_root.path == ".": return StrippedSourceFileNames(sources_paths.files) return StrippedSourceFileNames(fast_relpath(f, source_root.path) for f in sources_paths.files)
[ 1360, 505, 3336 ]
def METHOD_NAME(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
[ 707, 12623 ]
def METHOD_NAME(*args,**kw): """register(search_function) Register a codec search function. Search functions are expected to take one argument, the encoding name in all lower case letters, and return a tuple of functions (encoder, decoder, stream_reader, stream_writer) (or a CodecInfo object).""" pass
[ 372 ]
def METHOD_NAME(val): return hasattr(val, "__len__") and not isinstance(val, str)
[ 137, 1098, 44 ]
def METHOD_NAME(self, result, error=False, **kwargs): if error: if "message" in result: log.error("Error while getting the list of remote images: {}".format(result["message"])) return self._remote_images = [] for res in result: image = Image(self._emulator, res["path"]) image.location = "remote" image.md5sum = res.get("md5sum") image.filesize = res.get("filesize") self._remote_images.append(image) self.image_list_changed_signal.emit()
[ 19, 2437, 245, 1076 ]
def METHOD_NAME(value): return {TYPE: AMQPTypes.symbol, VALUE: value}
[ 13663, 1608, 99 ]
def METHOD_NAME(self): form = AptNumberWithConfirmationForm(data={"apt_number": "3B", "no_apt_number": True}) form.full_clean() assert form.errors == { "__all__": [ "Please either provide an apartment number or check " 'the "I have no apartment number" checkbox (but not both).' ] }
[ 9, 1807, 45, 3451, 1646, 2171, 472 ]
def METHOD_NAME(get_dataframe): """ Test a non-default value column. """ query = CustomQuery( "SELECT * FROM generate_series(0, 100) AS t(non_default_value_column)", column_names=["non_default_value_column"], ) agg = HistogramAggregation( metric=query, bins=5, value_column="non_default_value_column" ) df = get_dataframe(agg) numpy_histogram, numpy_bins = np.histogram( get_dataframe(query).non_default_value_column, bins=5 ) assert df.value.sum() == len(get_dataframe(query)) assert numpy_histogram.tolist() == df.value.tolist() assert numpy_bins.tolist()[:-1] == pytest.approx(df.lower_edge.tolist()) assert numpy_bins.tolist()[1:] == pytest.approx(df.upper_edge.tolist())
[ 9, 256, 235, 99, 105 ]
def METHOD_NAME(my_resources): def check_outputs(outputs): assert outputs["haha"] == "business" my_resources['dns_ref'].outputs.apply(check_outputs)
[ 9, 1501, 272 ]
def METHOD_NAME(args): A = lambda x: args.__dict__[x] if x in args.__dict__ else None order_name = A('name') db_path = A('db_path') output_file_path = A('output_file') or 'unknown_items_order.txt' if not db_path: raise ConfigError("Probably it will come as a surprise, but you *must* provide an input database path :/") filesnpaths.is_output_file_writable(output_file_path) item_order_names, item_orders_dict = dbops.get_item_orders_from_db(db_path) if not len(item_order_names): raise ConfigError("There are no item orders in this database :/") if not order_name: run.warning("You must choose an order. Here is what you have in here:", header="Available item orders", lc='yellow') for item_order in item_order_names: item_order_name, item_order_distance, item_order_clustering = item_order.split(':') nl_after = 1 if item_order == item_order_names[-1] else 0 if item_order_distance: run.info_single("%s (newick; distance: %s, clustering: %s)." % (item_order_name, item_order_distance, item_order_clustering), nl_after=nl_after) else: run.info_single("%s (list)." % (item_order_name), nl_after=nl_after) items_order_of_interest = None for item_order in item_order_names: item_order_name, item_order_distance, item_order_clustering = item_order.split(':') if order_name == item_order_name: items_order_of_interest = item_orders_dict[item_order] if not items_order_of_interest: raise ConfigError("The item order '%s' is not one of the item orders in the database. This what you " "have in there: '%s'." % (order_name, ', '.join(item_order_names))) order_data_type_newick = items_order_of_interest['type'] == 'newick' run.info("Database", db_path) run.info("Database type", utils.get_db_type(db_path)) run.info("Order name", order_name) run.info("Order data type", 'newick' if order_data_type_newick else 'basic') if order_data_type_newick: open(output_file_path, 'w').write('%s\n' % items_order_of_interest['data']) else: open(output_file_path, 'w').write('%s\n' % '\n'.join(items_order_of_interest['data'])) run.info("Output file", output_file_path, mc='red')
[ 57 ]
def METHOD_NAME(valid_owner, invalid_owner): """Confirm rejection of invalid owners even mixed with good ones.""" assert not sut.valid_owners([valid_owner, invalid_owner])
[ 9, 532, 1384, 11370, 532 ]
def METHOD_NAME(cls) -> Role: return cls._get_instance()._guest
[ 6483 ]
def METHOD_NAME(*arrays, **kwargs): from awkward.highlevel import Array, ArrayBuilder, Record behavior = kwargs.get("behavior") if behavior is not None: # An explicit 'behavior' always wins. return behavior copied = False highs = ( Array, Record, ArrayBuilder, ) for x in arrays[::-1]: if isinstance(x, highs) and x.behavior is not None: if behavior is None: behavior = x.behavior elif behavior is x.behavior: pass elif not copied: behavior = dict(behavior) behavior.update(x.behavior) copied = True else: behavior.update(x.behavior) return behavior
[ 3415, 47 ]
def METHOD_NAME(self): return "test-project"
[ 155 ]
def METHOD_NAME(logits, labels): loss = tf.reduce_sum( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) # Scale loss by global batch size. return loss * (1. / FLAGS.batch_size)
[ 226, 1572 ]
def METHOD_NAME(self): args = [] for var in VARIANTS: args.extend(self.enable_or_disable(var)) args.append("--disable-sphinx") args.extend(self.enable_or_disable("shared")) args.extend(self.with_or_without("pic")) return args
[ 111, 335 ]
def METHOD_NAME( use_zeros_for_missing: bool, eval_only: bool, data_dir: str, text_agg: str, text_feat: str, split_name: str, dataset_name: str, cls_partition: str, root_feat_folder: str, text_dim: int, num_test_captions: int, restrict_train_captions: int, logger: logging.Logger, max_tokens: Dict[str, int], raw_input_dims: HashableOrderedDict, feat_aggregation: HashableDict, ): print(f"refreshing cache for {dataset_name} data loader [{split_name}]") kwargs = dict( data_dir=Path(data_dir), text_dim=text_dim, logger=logger, eval_only=eval_only, text_agg=text_agg, text_feat=text_feat, max_tokens=max_tokens, split_name=split_name, cls_partition=cls_partition, raw_input_dims=raw_input_dims, root_feat_folder=root_feat_folder, feat_aggregation=feat_aggregation, num_test_captions=num_test_captions, use_zeros_for_missing=use_zeros_for_missing, restrict_train_captions=restrict_train_captions, ) if dataset_name == "MSRVTT": dataset = MSRVTT(**kwargs) return dataset
[ 126, 467 ]
def METHOD_NAME(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor: return TFNNCFTensor(tf.METHOD_NAME(x.tensor, axis=axis, keepdims=keepdims))
[ 332, 1835 ]
def METHOD_NAME(): def callback(env): lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train) if env.iteration - env.begin_iteration == 5: print('Add a new valid dataset at iteration 5...') env.model.add_valid(lgb_eval_new, 'new_valid') callback.before_iteration = True callback.order = 0 return callback
[ 656, 1097 ]
def METHOD_NAME(model, form, field_name): """ Read tags form field into model field :param model: Model containing tag field :param form: Form data from front-end :param field_name: Name of field shared by model and form :return: True if changes to model tag field were made """ form_tags = read_form_field(form, field_name) if form_tags is not None: if is_json_string(form_tags): # Convert full tag data to comma-delimited slugs string form_tags_json = json.loads(form_tags) form_tag_slugs = list(map(lambda tag: tag['tag_name'], form_tags_json)) form_tags = ','.join(form_tag_slugs) return Tag.merge_tags_field(getattr(model, field_name), form_tags) return False
[ 203, 1029, 101, 114 ]
def METHOD_NAME(resolver: Any, obj: Any, expected: str) -> None: assert resolver.get_str(obj) == expected
[ 9, 19, 3 ]
def METHOD_NAME( df: Union[pd.DataFrame, dd.DataFrame], column: str, output_format: str = "standard", inplace: bool = False, errors: str = "coerce", progress: bool = True, ) -> pd.DataFrame: """ Clean Polish VAT numbers (NIPs) type data in a DataFrame column. Parameters ---------- df A pandas or Dask DataFrame containing the data to be cleaned. col The name of the column containing data of NIP type. output_format The output format of standardized number string. If output_format = 'compact', return string without any separators or whitespace. If output_format = 'standard', return string with proper separators and whitespace. (default: "standard") inplace If True, delete the column containing the data that was cleaned. Otherwise, keep the original column. (default: False) errors How to handle parsing errors. - ‘coerce’: invalid parsing will be set to NaN. - ‘ignore’: invalid parsing will return the input. - ‘raise’: invalid parsing will raise an exception. (default: 'coerce') progress If True, display a progress bar. (default: True) Examples -------- Clean a column of NIP data. >>> df = pd.DataFrame({ "nip": [ "PL 8567346215", "PL 8567346216",] }) >>> clean_pl_nip(df, 'nip') nip nip_clean 0 PL 8567346215 856-734-62-15 1 PL 8567346216 NaN """ if output_format not in {"compact", "standard"}: raise ValueError( f"output_format {output_format} is invalid. " 'It needs to be "compact" or "standard".' ) # convert to dask df = to_dask(df) # To clean, create a new column "clean_code_tup" which contains # the cleaned values and code indicating how the initial value was # changed in a tuple. Then split the column of tuples and count the # amount of different codes to produce the report df["clean_code_tup"] = df[column].map_partitions( lambda srs: [_format(x, output_format, errors) for x in srs], meta=object, ) df = df.assign( _temp_=df["clean_code_tup"].map(itemgetter(0)), ) df = df.rename(columns={"_temp_": f"{column}_clean"}) df = df.drop(columns=["clean_code_tup"]) if inplace: df[column] = df[f"{column}_clean"] df = df.drop(columns=f"{column}_clean") df = df.rename(columns={column: f"{column}_clean"}) with ProgressBar(minimum=1, disable=not progress): df = df.compute() return df
[ 1356, 10245, 14358 ]
def METHOD_NAME(self): """ Returns the geoJSON compliant representation of this location :returns: a ``pyowm.utils.geo.Point`` instance """ if self.lon is None or self.lat is None: return None return geo.Point(self.lon, self.lat)
[ 24, 16977 ]
def METHOD_NAME(self, epoch: int): """ This method is called in every "step" of the search. Args: epoch: epoch number """ # For reference, this is the code inside new_epoch in RandomSearch: # model = torch.nn.Module() # model.arch = self.search_space.clone() # model.arch.sample_random_architecture(dataset_api=self.dataset_api) # model.accuracy = model.arch.query( # self.performance_metric, # self.dataset, # epoch=self.fidelity, # dataset_api=self.dataset_api, # ) # self.sampled_archs.append(model) # self._update_history(model) # All it does ########################################################### ##################### START TODO ########################## # Write your logic here # Also feel free to write new methods in this class ##################### END TODO ########################## ########################################################## self.sampled_archs.append(model) # This line is required. Add your chosen model to sampled_archs here.
[ 80, 1165 ]
def METHOD_NAME(): fig_presenter.METHOD_NAME()
[ 1100 ]
def METHOD_NAME(self, delayed_job, attachment): queue_job = self.env["queue.job"].search( [("uuid", "=", delayed_job.uuid)], limit=1 ) attachment.write({"res_model": "queue.job", "res_id": queue_job.id})
[ 548, 70, 24, 202 ]
def METHOD_NAME(mode, status, ip): # reports wifi connection status print(mode, status, ip) print('Connecting to wifi...') if status is not None: if status: print('Wifi connection successful!') else: print('Wifi connection failed!')
[ 452, 1519 ]
def METHOD_NAME(backup_policy_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, vault_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBackupPolicyResult]: """ Gets a backup policy belonging to a backup vault :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str vault_name: The name of the backup vault. """ ...
[ 19, 1001, 54, 146 ]
def METHOD_NAME(tree, join, leaf=identity): """Apply functions onto recursive containers (tree). join - a dictionary mapping container types to functions e.g. ``{list: minimize, tuple: chain}`` Keys are containers/iterables. Values are functions [a] -> a. Examples ======== >>> tree = [(3, 2), (4, 1)] >>> treeapply(tree, {list: max, tuple: min}) 2 >>> def mul(*args): ... total = 1 ... for arg in args: ... total *= arg ... return total >>> treeapply(tree, {list: mul, tuple: lambda *args: sum(args)}) 25 """ for typ in join: if isinstance(tree, typ): return join[typ](*map(functools.partial(METHOD_NAME, join=join, leaf=leaf), tree)) return leaf(tree)
[ -1 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("OperationList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME( shape: Tuple[int], data: List[torch.Tensor], input_name: str, channel_axis: int ) -> torch.Tensor: blob = torch.zeros(shape, dtype=data[0].dtype) for j, idx in enumerate(np.ndindex(blob.shape[channel_axis])): index = tuple(slice(None) if i != channel_axis else idx for i in range(blob.ndim)) blob[index] = data[j] return blob
[ 129, 362, 365 ]
def METHOD_NAME(self, parameter_s=''): """Make magic functions callable without having to type the initial %. Without arguments toggles on/off (when off, you must call it as %automagic, of course). With arguments it sets the value, and you can use any of (case insensitive): - on, 1, True: to activate - off, 0, False: to deactivate. Note that magic functions have lowest priority, so if there's a variable whose name collides with that of a magic fn, automagic won't work for that function (you get the variable instead). However, if you delete the variable (del var), the previously shadowed magic function becomes visible to automagic again.""" arg = parameter_s.lower() mman = self.shell.magics_manager if arg in ('on', '1', 'true'): val = True elif arg in ('off', '0', 'false'): val = False else: val = not mman.auto_magic mman.auto_magic = val print('\n' + self.shell.magics_manager.auto_status())
[ 15810 ]
def METHOD_NAME(database, table, schema, engine=None): meta_res = engine.execute(''' SELECT SC.name as table_name, ST.name as table_column FROM {0}.sys.sysobjects SO, {1}.sys.syscolumns SC, {2}.sys.systypes ST WHERE SO.id = SC.id AND SO.xtype = 'U' AND SO.status >= 0 AND SC.xtype = ST.xusertype AND SO.name = '{3}' ORDER BY SO.name, SC.colorder '''.format(database, database, database, table)).fetchall() meta = [] i = 0 for colData in meta_res: scores = {"key": colData.table_name, "colIndex": i, "dataType": colData.table_column} meta.append(scores) i += 1 return meta
[ 11606, 14108 ]
def METHOD_NAME(): client = ComputeManagementClient( credential=DefaultAzureCredential(), subscription_id="{subscription-id}", ) response = client.virtual_machine_scale_sets.begin_create_or_update( resource_group_name="myResourceGroup", vm_scale_set_name="{vmss-name}", parameters={ "location": "westus", "properties": { "overprovision": True, "upgradePolicy": {"mode": "Manual"}, "virtualMachineProfile": { "hardwareProfile": {"vmSizeProperties": {"vCPUsAvailable": 1, "vCPUsPerCore": 1}}, "networkProfile": { "networkInterfaceConfigurations": [ { "name": "{vmss-name}", "properties": { "enableIPForwarding": True, "ipConfigurations": [ { "name": "{vmss-name}", "properties": { "subnet": { "id": "/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/{existing-virtual-network-name}/subnets/{existing-subnet-name}" } }, } ], "primary": True, }, } ] }, "osProfile": { "adminPassword": "{your-password}", "adminUsername": "{your-username}", "computerNamePrefix": "{vmss-name}", }, "storageProfile": { "imageReference": { "offer": "WindowsServer", "publisher": "MicrosoftWindowsServer", "sku": "2016-Datacenter", "version": "latest", }, "osDisk": { "caching": "ReadWrite", "createOption": "FromImage", "managedDisk": {"storageAccountType": "Standard_LRS"}, }, }, "userData": "RXhhbXBsZSBVc2VyRGF0YQ==", }, }, "sku": {"capacity": 3, "name": "Standard_D1_v2", "tier": "Standard"}, }, ).result() print(response)
[ 57 ]
def METHOD_NAME(self): return self.variable
[ 19, 1210 ]
def METHOD_NAME(self, n): for _ in range(n): num_v = np.random.randint(self.min_num_v, self.max_num_v) path_len = np.random.randint(2, num_v // 2) g = nx.lollipop_graph(m=num_v - path_len, n=path_len) self.graphs.append(g) self.labels.append(3)
[ 370, -1 ]
def METHOD_NAME(env, file_flag, extra_deps, **kw): usb_update = env.UsbInstall( file_flag, ( env["DIST_DEPENDS"], *extra_deps, ), ) if env["FORCE"]: env.AlwaysBuild(usb_update) return usb_update
[ 238, 8323, 6062, 1030 ]
def METHOD_NAME(self): print("test envelope detection") # absolute value _abs = apply_b_mode(self.test_image, method=Tags.RECONSTRUCTION_BMODE_METHOD_ABS) expected_abs = np.array([[1.2, 0.], [3., 255.]]) assert np.equal(_abs, expected_abs).all(), "computed absolute array and expected don't match" # Hilbert transform hilbert = apply_b_mode(self.test_image, method=Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM) expected_hilbert = np.array([[1.2, 0.], [3., 255.]]) assert np.equal(hilbert, expected_hilbert).all(), "computed hilbert transform array and expected don't match"
[ 9, 6175, 381 ]
def METHOD_NAME(self) -> bool: """ true if the group is one of the three system groups (Administrators, Developers, or Guests); otherwise false. """ return pulumi.get(self, "built_in")
[ 4737, 623 ]
def METHOD_NAME(relay_server, wandb_init, assets_path): with relay_server() as relay: run = wandb_init() run.log( { "point_cloud": wandb.Object3D.from_file( str(assets_path("point_cloud.pts.json")) ) } ) run.finish() assert relay.context.summary["point_cloud"][0]["_type"] == "object3D-file" assert relay.context.summary["point_cloud"][0]["path"].endswith(".pts.json")
[ 9, -1, 663 ]
def METHOD_NAME(self, args, val_arg_na, prefix, argname): """_set_args. Since we could not hard code all possible strings in the argparser, we add attributes to args name space according to the user specification from the custom python file they provide the custom model the user wrote should have model.dict_net_module_na2arg_na with something like {"net1":"name1", "net2":"name2"} val_arg_na below will be filled with "name1" for instance python main_out.py –te_d=caltech –task=mini_vlcs –debug –bs=3 –apath=examples/algos/demo_custom_model.py –aname=custom –nname_argna2val net1 –nname_argna2val alexnet :param args: the namespace of command line arguemnts :param val_arg_na: the custom argument name the user specified :param prefix: nname or npath to be consistent with the rest of the package :param argname: nname_argna2val or "npath_argna2val", hard coded """ if getattr(args, argname) is None: setattr(args, prefix+val_arg_na, None) return list_args = getattr(args, argname) ind = list_args.index(val_arg_na) if ind+1 >= len(list_args): # list of args always even length raise RuntimeError("\n nname_argna2val or npath_argna2val should \ \n always be specified in pairs instead of \ odd number:\ \n %s" % ( str(list_args))) val = list_args[ind+1] # add attributes to namespaces args, the attributes are provided by # user in the custom model file setattr(args, prefix+val_arg_na, val)
[ 0, 335 ]
def METHOD_NAME(self): """ Test :func:`colour.models.rgb.transfer_functions.itur_bt_709.\
[ 9, 1674, 661, 930, 3580, 3581, 1484 ]
def METHOD_NAME(self, amount: int = 4) -> None: """Increase the indentation level by ``amount``, default 4.""" self._level += amount
[ 2531 ]
def METHOD_NAME(): assert ( yaml_utils.load( io.StringIO( dedent( """\ base: foo build-base: core22 """ ) ) ) == { "base": "foo", "build-base": "core22", } )
[ 9, 406, 557, 56, 414 ]
def METHOD_NAME(s): if HYSTERROR & debuglevel: syslog.openlog("FANCONTROL-HYST", syslog.LOG_PID) syslog.syslog(syslog.LOG_ERR, s)
[ 11286, 168 ]
def METHOD_NAME( func: Callable[TParams, TReturn] ) -> Callable[TParams, Optional[TReturn]]: """Skip test run if we are in ASAN mode.""" @wraps(func) def wrapper(*args: TParams.args, **kwargs: TParams.kwargs) -> Optional[TReturn]: if is_asan_or_tsan(): print("Skipping test run since we are in ASAN mode.") return return func(*args, **kwargs) return wrapper
[ 2423, 217, 3109 ]
def METHOD_NAME(self, fledge_url, wait_time, request_params, total_count, audit_count, storage_plugin): # wait for Fledge start, first test only, once in the iteration before start if request_params == '': time.sleep(wait_time) conn = http.client.HTTPConnection(fledge_url) conn.request("GET", '/fledge/audit{}'.format(request_params)) r = conn.getresponse() assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) elems = jdoc['audit'] assert len(jdoc), "No data found" assert total_count == jdoc['totalCount'] assert audit_count == len(elems)
[ 9, 235, 19, 1422 ]
def METHOD_NAME(test_options): """Fixture to flag test using a large memory consumption. This can be skipped with `--low-mem`. """ if test_options.TEST_LOW_MEM: pytest.skip(test_options.TEST_LOW_MEM_REASON, allow_module_level=True)
[ 1080, 1953, 1645 ]
def METHOD_NAME(self, completer): self.completer = completer self.initialize_completer()
[ 0, 11583 ]
def METHOD_NAME(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(self): "Get the public identifier of the entity where the exception occurred." return self._locator.METHOD_NAME()
[ 19, 1609, 147 ]
def METHOD_NAME(self, request, *args, **kwargs): result = { 'status': 200, 'message': 'OK' } d = request.data if 'pos_ini' in d and 'pos_fim' in d: if d['pos_ini'] != d['pos_fim']: pk = kwargs['pk'] TipoMateriaLegislativa.objects.reposicione(pk, d['pos_fim']) return Response(result)
[ 194, 195 ]
def METHOD_NAME(self) -> None: self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME( self, resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.ListAmlUserFeatureResult"] """Lists all enabled features for a workspace. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: Name of Azure Machine Learning workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ListAmlUserFeatureResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ListAmlUserFeatureResult] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = kwargs.pop('api_version', "2023-04-01-preview") # type: str cls = kwargs.pop('cls', None) # type: ClsType["_models.ListAmlUserFeatureResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, api_version=api_version, template_url=self.METHOD_NAME.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, api_version=api_version, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("ListAmlUserFeatureResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data )
[ 245 ]
def METHOD_NAME(end_time: Optional[pulumi.Input[str]] = None, entity_id: Optional[pulumi.Input[str]] = None, kinds: Optional[pulumi.Input[Optional[Sequence[Union[str, 'EntityTimelineKind']]]]] = None, number_of_bucket: Optional[pulumi.Input[Optional[int]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, start_time: Optional[pulumi.Input[str]] = None, workspace_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetEntitiesGetTimelineResult]: """ Timeline for an entity. :param str end_time: The end timeline date, so the results returned are before this date. :param str entity_id: entity ID :param Sequence[Union[str, 'EntityTimelineKind']] kinds: Array of timeline Item kinds. :param int number_of_bucket: The number of bucket for timeline queries aggregation. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str start_time: The start timeline date, so the results returned are after this date. :param str workspace_name: The name of the workspace. """ ...
[ 19, 5399, 19, 6938, 146 ]
def METHOD_NAME(self): filter_file( r"(<INSTALL_INTERFACE:include>)", r"\1 {0}/include".format(self.spec["hsa-rocr-dev"].prefix), "CMakeLists.txt", )
[ 1575 ]
def METHOD_NAME(data, dtype="int64"): """Return Segment Id from data """ unique, index = paddle.unique(data, return_inverse=True, dtype=dtype) return unique, index
[ 2768, 4373 ]
def METHOD_NAME(resource_id: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetResolverFirewallConfigResult]: """ `route53.ResolverFirewallConfig` provides details about a specific a Route 53 Resolver DNS Firewall config. This data source allows to find a details about a specific a Route 53 Resolver DNS Firewall config. ## Example Usage The following example shows how to get a firewall config using the VPC ID. ```python import pulumi import pulumi_aws as aws example = aws.route53.get_resolver_firewall_config(resource_id="vpc-exampleid") ``` :param str resource_id: The ID of the VPC from Amazon VPC that the configuration is for. The following attribute is additionally exported: """ ...
[ 19, 1836, 650, 200, 146 ]
def METHOD_NAME(self, source, icon_fn, size): dest = source.resize((size, size)) dest.save(icon_fn, 'png')
[ 197, 24 ]
def METHOD_NAME(): """ Return the data schema for a record in MongoDB. It's a dictionary where: - key is schema attribute name - a value is a tuple of (default value, expected data type) :return: a dictionary """ doc = {'pileupName': ('', str), 'pileupType': ('', str), 'insertTime': (0, int), 'lastUpdateTime': (0, int), 'expectedRSEs': ([], list), 'currentRSEs': ([], list), 'fullReplicas': (0, int), 'campaigns': ([], list), 'containerFraction': (1.0, float), 'replicationGrouping': ('', str), 'activatedOn': (0, int), 'deactivatedOn': (0, int), 'active': (False, bool), 'pileupSize': (0, int), 'ruleIds': ([], list)} return doc
[ 135 ]
def METHOD_NAME(): char1d = (ctypes.c_char * 10)() int1d = (ctypes.c_int * 15)() long1d = (ctypes.c_long * 7)() for carray in (char1d, int1d, long1d): info = m.get_buffer_info(carray) assert info.itemsize == ctypes.sizeof(carray._type_) assert info.size == len(carray) assert info.ndim == 1 assert info.shape == [info.size] assert info.strides == [info.itemsize] assert not info.readonly
[ 9, 5578, 877, 2481 ]
def METHOD_NAME(dev): return dev.edt_node and dev.edt_node.path or dev.sym.name
[ 828, 157, 3 ]
def METHOD_NAME(self) -> float: return self.__read_file(self.export_file)
[ 203, 294 ]
def METHOD_NAME(keyring_type): match keyring_type: case "failed": return FailedKeyring() case _: return TestKeyring()
[ 3341, 384, 44, 47, 2953 ]
async def METHOD_NAME( self, resource_group_name: str, server_name: str, database_name: str, maintenance_window_options_name: str, **kwargs: Any ) -> _models.MaintenanceWindowOptions: """Gets a list of available maintenance windows. :param resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. Required. :type resource_group_name: str :param server_name: The name of the server. Required. :type server_name: str :param database_name: The name of the database to get maintenance windows options for. Required. :type database_name: str :param maintenance_window_options_name: Maintenance window options name. Required. :type maintenance_window_options_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: MaintenanceWindowOptions or the result of cls(response) :rtype: ~azure.mgmt.sql.models.MaintenanceWindowOptions :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2020-11-01-preview")) cls: ClsType[_models.MaintenanceWindowOptions] = kwargs.pop("cls", None) request = build_get_request( resource_group_name=resource_group_name, server_name=server_name, database_name=database_name, subscription_id=self._config.subscription_id, maintenance_window_options_name=maintenance_window_options_name, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("MaintenanceWindowOptions", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
[ 19 ]
def METHOD_NAME(self, state, isod_value): """ Function triggered when the checkbox of a structure is checked / unchecked. Update the list of selected structures. Update the DICOM view. :param state: True if the checkbox is checked, False otherwise. :param isod_value: Percentage of isodose. """ selected_doses = self.patient_dict_container.get("selected_doses") if state: # Add the dose to the list of selected doses selected_doses.append(isod_value) else: # Remove dose from list of previously selected doses selected_doses.remove(isod_value) self.patient_dict_container.set("selected_doses", selected_doses) # Update the dicom view self.request_update_isodoses.emit()
[ 4440, 7088 ]
def METHOD_NAME( self, **kwargs # type: Any ): # type: (...) -> Iterable["models.SwiftletOperationListResult"] """Gets a list of Swiftlet operations. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SwiftletOperationListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~swiftlet_management_client.models.SwiftletOperationListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SwiftletOperationListResult"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01-preview" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/json' if not next_link: # Construct URL url = self.METHOD_NAME.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('SwiftletOperationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data )
[ 245 ]
def METHOD_NAME(): opts = get_cli().parse_args() config = Config.load() status_filter = {ExerciseStatus.Active, ExerciseStatus.Beta} if opts.include_deprecated: status_filter.add(ExerciseStatus.Deprecated) if opts.include_wip: status_filter.add(ExerciseStatus.WIP) exercises = config.exercises.all(status_filter) if opts.exercises: # test specific exercises exercises = [ e for e in exercises if e.slug in opts.exercises ] not_found = [ slug for slug in opts.exercises if not any(e.slug == slug for e in exercises) ] if not_found: for slug in not_found: if slug not in exercises: print(f"unknown or disabled exercise '{slug}'") raise SystemExit(1) print(f'TestEnvironment: {sys.executable.capitalize()}') print(f'Runner: {opts.runner}\n\n') failures = [] for exercise in exercises: print('# ', exercise.slug) if not exercise.test_file: print('FAIL: File with test cases not found') failures.append('{} (FileNotFound)'.format(exercise.slug)) else: if check_assignment(exercise, runner=opts.runner, quiet=opts.quiet): failures.append('{} (TestFailed)'.format(exercise.slug)) print('') if failures: print('FAILURES: ', ', '.join(failures)) raise SystemExit(1) else: print('SUCCESS!')
[ 57 ]
def METHOD_NAME(): """ This fixture returns the Boto Session instance for testing. """ from localstack.testing.aws.util import base_aws_session return base_aws_session()
[ 874, 240 ]
def METHOD_NAME(self): with open(self.path, 'rb') as f: fcontents = f.read() self.comments = self._findComments(fcontents)
[ 214 ]
def METHOD_NAME(self: Any, mock_method: Any) -> None: """ Test Extraction from single result from query """ extractor = SQLAlchemyExtractor() extractor.results = [('test_result')] extractor.init(Scoped.get_scoped_conf(conf=self.conf, scope=extractor.get_scope())) results = extractor.extract() self.assertEqual(results, 'test_result')
[ 9, 7252, 41, 97, 539, 1571 ]
def METHOD_NAME(self) -> NamedUser: self._completeIfNotSet(self._user) return self._user.value
[ 21 ]
def METHOD_NAME(self): d = MissingDependencyResolver(elf_files=["/bin/bash", "/bin/sh"]) self.assertThat(d._stage_packages_dependencies, Equals({"bash", "dash"})) self.assertThat(d._unhandled_dependencies, Equals(set())) echoer = mock.Mock() d.print_resolutions( part_name="fake-part", stage_packages_exist=True, echoer=echoer ) echoer.warning.assert_called_once_with( "The 'fake-part' part is missing libraries that are not included " "in the snap or base. They can be satisfied by adding the " "following entries to the existing stage-packages for this part:\n- bash\n- dash" ) echoer.reset_mock() d.print_resolutions( part_name="fake-part", stage_packages_exist=False, echoer=echoer ) echoer.warning.assert_called_once_with( "The 'fake-part' part is missing libraries that are not included " "in the snap or base. They can be satisfied by adding the " "following entry for this part\nstage-packages:\n- bash\n- dash" )
[ 9, 75, 3164, 2975 ]
def METHOD_NAME( generated_docs, service_name, client, waiter_model ): ref_lines = ['=======', 'Waiters', '=======', 'The available waiters are:'] for waiter_name in waiter_model.waiter_names: ref_lines.append(f' {service_name}/waiter/{waiter_name}') for waiter_name in waiter_model.waiter_names: _assert_contains_lines_in_order( [ '.. py:class:: {}.Waiter.{}'.format( client.__class__.__name__, waiter_name ), ' waiter = client.get_waiter(\'%s\')' % xform_name(waiter_name), ' .. py:method:: wait(', ], get_nested_file_contents(service_name, 'waiter', waiter_name), ) _assert_contains_lines_in_order(ref_lines, generated_docs)
[ 638, 220, 8413, 1200 ]
def METHOD_NAME(seq: Iterable[str], sep: str = ", ", conjuction: str = "or") -> str: ''' Join together sequences of strings into English-friendly phrases using the conjunction ``or`` when appropriate. Args: seq (seq[str]) : a sequence of strings to nicely join sep (str, optional) : a sequence delimiter to use (default: ", ") conjunction (str or None, optional) : a conjuction to use for the last two items, or None to reproduce basic join behaviour (default: "or") Returns: a joined string Examples: >>> nice_join(["a", "b", "c"]) 'a, b or c' ''' seq = [str(x) for x in seq] if len(seq) <= 1 or conjuction is None: return sep.join(seq) else: return f"{sep.join(seq[:-1])} {conjuction} {seq[-1]}"
[ 5742, 2831 ]
def METHOD_NAME(self): return "GET"
[ 103 ]
def METHOD_NAME(_db, library): config = Configuration.integration('Axis 360') if not config: print("No Axis 360 configuration, not creating a Collection for it.") return print("Creating Collection object for Axis 360 collection.") username = config.get('username') password = config.get('password') library_id = config.get('library_id') # This is not technically a URL, it's "production" or "staging", # but it's converted into a URL internally. url = config.get('server') collection, ignore = get_one_or_create( _db, Collection, protocol=Collection.AXIS_360, name="Axis 360" ) library.collections.append(collection) collection.external_integration.username = username collection.external_integration.password = password collection.external_account_id = library_id collection.external_integration.url = url
[ 197, 2227 ]
def METHOD_NAME(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]: for pkgpath_abs in _iter_directories(path): pkgpath_rel = os.path.relpath(pkgpath_abs, path) with factory.begin(pkgpath_rel) as pkg: manifests = [filename for filename in os.listdir(pkgpath_abs) if filename.endswith('.yaml')] for manifest in manifests: try: with open(os.path.join(pkgpath_abs, manifest), 'r') as fd: manifest_data = yaml.safe_load(fd) except UnicodeDecodeError: pkg.log(f'failed to decode {manifest}, probably UTF-16 garbage', Logger.ERROR) continue except yaml.MarkedYAMLError as e: if e.problem_mark: pkg.log(f'YAML error in {manifest} at line {e.problem_mark.line}: {e.problem}', Logger.ERROR) else: pkg.log(f'YAML error in {manifest}: {e.problem}', Logger.ERROR) continue _parse_manifest(manifest_data, pkg) # skip manifests/ at the left and version directory at the right relevant_path = '/'.join(pkgpath_rel.split('/')[1:-1]) pkg.add_name(relevant_path, NameType.WINGET_PATH) pkg.set_extra_field('path', pkgpath_rel) if not pkg.version: pkg.log('could not parse required information from all manifests, skipping', Logger.ERROR) else: yield pkg
[ 84, 214 ]
def METHOD_NAME(obj): """ Finds all common dimension keys in the object including subsets of dimensions. If there are is no common subset of dimensions, None is returned. """ from .spaces import HoloMap dim_groups = obj.traverse(lambda x: tuple(x.kdims), (HoloMap,)) if dim_groups: dgroups = [frozenset(d.name for d in dg) for dg in dim_groups] return all(g1 <= g2 or g1 >= g2 for g1 in dgroups for g2 in dgroups) return True
[ 4431 ]
def METHOD_NAME(self): return self.name
[ 19, 156 ]
def METHOD_NAME(self): """test `pbench-agent` section in config file""" expected_error_msg = "'pbench-agent': []" with pytest.raises(BadConfig) as exc: PbenchAgentConfig(self.config) assert expected_error_msg in str(exc)
[ 9, 1068, 1849, 200 ]
def METHOD_NAME(self): data = { 'sn': '', 'barcode': '', } form = OneRequiredTestForm(data) self.assertFalse(form.is_valid())
[ 9, 130, 1205, 1646, 35, -1 ]
def METHOD_NAME(self, obj: models.Model, timestamp: int) -> str: # timestamp is number of days since 2020-1-1. Converted to # base 36, this gives us a 3 digit string until about 2141 ts_b36 = int_to_base36(timestamp) hash_string = salted_hmac( self.key_salt, self._make_hash_value(obj, timestamp), secret=self.secret, ).hexdigest()[ ::2 ] # Limit to 20 characters to shorten the URL. return "%s-%s" % (ts_b36, hash_string)
[ 93, 466, 41, 2722 ]
def METHOD_NAME( pcluster_config_reader, cfn_stacks_factory, test_datadir, clusters_factory, ): cluster_config = pcluster_config_reader() cluster: Cluster = clusters_factory(cluster_config) head_node_role = cluster.cfn_resources.get("RoleHeadNode") iam = boto3.client("iam") policies = iam.get_role_policy(RoleName=head_node_role, PolicyName="parallelcluster") policies = {policy.get("Sid"): policy for policy in policies.get("PolicyDocument").get("Statement")} assert_that(policies).does_not_contain_key("EC2GetComputeConsoleOutput") for statement in (policies.get(sid) for sid in policies): action = statement.get("Action") assert_that( "ec2:GetConsoleOutput" in action if isinstance(action, list) else action == "ec2:GetConsoleOutput" ).is_false() remote_command_executor = RemoteCommandExecutor(cluster) config = _get_clustermgtd_config(remote_command_executor) assert_that( config.getboolean( "clustermgtd", "compute_console_logging_enabled", fallback=False, ) ).is_false()
[ 9, 2516, 146, 41, 2427, 1295 ]
def METHOD_NAME(self, data: str, datatype: object) -> None: if self._input is None: # Should not happen. return self._input.send_text(data)
[ 365, 1732 ]
def METHOD_NAME(matrix, nan_strategy, nan_replace_value): num_variables = matrix.shape[1] theils_u_matrix_value = torch.ones(num_variables, num_variables) for i, j in itertools.combinations(range(num_variables), 2): x, y = matrix[:, i], matrix[:, j] theils_u_matrix_value[i, j] = _dython_theils_u(x, y, nan_strategy, nan_replace_value) theils_u_matrix_value[j, i] = _dython_theils_u(y, x, nan_strategy, nan_replace_value) return theils_u_matrix_value
[ 15298, 8168, 3597, 430 ]
def METHOD_NAME(): try: # Create a job service object that represent a remote pbs cluster. # The keyword 'pbs' in the url scheme triggers the SGE adaptors # and '+ssh' enables SGE remote access via SSH. js = rs.job.Service(js_url) # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd = rs.job.Description() jd.environment = {'FILENAME': 'testfile'} jd.wall_time_limit = 1 # minutes jd.executable = '/bin/touch' jd.arguments = ['$FILENAME'] jd.name = "examplejob" # jd.queue = "normal" # jd.project = "TG-MCB090174" jd.working_directory = ".saga/test" jd.output = "examplejob.out" jd.error = "examplejob.err" # Create a new job from the job description. The initial state of # the job is 'New'. job = js.create_job(jd) # Check our job's id and state print("Job State : %s" % (job.state)) # Now we can start our job. print("starting job") job.run() print("Job ID : %s" % (job.id)) print("Job State : %s" % job.state) print("Exitcode : %s" % job.exit_code) print("Exec. hosts : %s" % job.execution_hosts) print("Create time : %s" % job.created) print("Start time : %s" % job.started) print("End time : %s" % job.finished) js.close() except rs.SagaException as e: # Catch all saga exceptions print("An exception occured: (%s) %s " % (e.type, (str(e)))) # Get the whole traceback in case of an exception - # this can be helpful for debugging the problem print(" \n*** Backtrace:\n %s" % e.traceback) return -1
[ 447 ]