text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(): """ Test the link attribute parsing """ # No link should default to an empty list assert Links().get() == [] # Single string (default relation) assert Links(data='/fmf/id').get() == [Link(relation='relates', target='/fmf/id')] # Multiple strings (default relation) assert Links(data=['one', 'two']).get() == [ Link(relation='relates', target='one'), Link(relation='relates', target='two')] # Multiple string mixed relation assert Links(data=['implicit', {'duplicates': 'explicit'}]).get() == [ Link(relation='relates', target='implicit'), Link(relation='duplicates', target='explicit')] # Multiple strings (explicit relation) assert Links(data=[{'parent': 'mom'}, {'child': 'son'}]).get() == [ Link(relation='parent', target='mom'), Link(relation='child', target='son')] # Single dictionary (default relation) assert Links(data={'name': 'foo'}).get() == [ Link(relation='relates', target=FmfId(name='foo'))] # Single dictionary (explicit relation) assert Links(data={'verifies': 'foo'}).get() == [Link(relation='verifies', target='foo')] # Multiple dictionaries family = [{'parent': 'mom', 'note': 'foo'}, {'child': 'son'}] assert Links(data=family).get() == [ Link(relation='parent', target='mom', note='foo'), Link(relation='child', target='son') ] # Selected relations assert Links(data=family).get('parent') == [Link(relation='parent', target='mom', note='foo')] assert Links(data=family).get('child') == [Link(relation='child', target='son')] # Full fmf id fmf_id = tmt.utils.yaml_to_dict(""" blocked-by: url: https://github.com/teemtee/fmf name: /stories/select/filter/regexp note: Need to get the regexp filter working first. """) link = Links(data=fmf_id) assert link.get() == [ Link( relation='blocked-by', target=FmfId( url=fmf_id['blocked-by']['url'], name=fmf_id['blocked-by']['name']), note=fmf_id['note'])] # Invalid links and relations with pytest.raises( SpecificationError, match="Field 'link' must be a string, a fmf id or a list of their combinations," " 'int' found."): Links(data=123) with pytest.raises(SpecificationError, match='Multiple relations'): Links(data={'verifies': 'one', 'blocks': 'another'}) with pytest.raises(SpecificationError, match='Invalid link relation'): Links(data={'depends': 'other'}) # Searching for links links = Links(data=[{'parent': 'mom', 'note': 'foo'}, {'child': 'son', 'note': 'bar'}]) assert links.has_link() assert links.has_link(needle=LinkNeedle()) assert links.has_link(needle=LinkNeedle(relation=r'.*', target=r'.*')) assert links.has_link(needle=LinkNeedle(relation='[a-z]+')) assert links.has_link(needle=LinkNeedle(relation='en')) assert links.has_link(needle=LinkNeedle(target='^mom$')) assert links.has_link(needle=LinkNeedle(target='on')) assert not links.has_link(needle=LinkNeedle(relation='verifies', target='son')) assert not links.has_link(needle=LinkNeedle(relation='parent', target='son')) links = Links(data=[]) assert not links.has_link() assert not links.has_link(needle=LinkNeedle()) assert not links.has_link(needle=LinkNeedle(relation=r'.*', target=r'.*'))
[ 9, 548 ]
async def METHOD_NAME( self, plugin_ctx: PluginCtx, corpora: List[str], subcorpus: str, value: str, value_type: str, value_subformat: str, query_type: str, p_attr: str, struct: str, s_attr: str): corpus_info = await self._corparch.get_corpus_info(plugin_ctx, plugin_ctx.current_corpus.corpname) ans = [] for ident, provider in self._providers.items(): if ident not in corpus_info.query_suggest.providers: continue backend, frontend = provider resp = await backend.find_suggestion(user_id=plugin_ctx.user_id, ui_lang=plugin_ctx.user_lang, maincorp=plugin_ctx.current_corpus, corpora=corpora, subcorpus=subcorpus, value=value, value_type=value_type, value_subformat=value_subformat, query_type=query_type, p_attr=p_attr, struct=struct, s_attr=s_attr) ans.append(frontend.export_data(resp, value, plugin_ctx.user_lang).to_dict()) return ans
[ 416, 3612 ]
def METHOD_NAME(self) -> bool: """Validate the model based on the party type (person/organization).""" if self.party_type == Party.PartyTypes.ORGANIZATION.value: if not self.organization_name or self.first_name or self.middle_initial or self.last_name: return False elif self.party_type == Party.PartyTypes.PERSON.value: if self.organization_name or not (self.first_name or self.middle_initial or self.last_name): return False return True
[ 1205, 346, 44, 365 ]
def METHOD_NAME(obj_or_type: _CData | type[_CData]) -> int: ...
[ 7880 ]
async def METHOD_NAME( discovered_group_template: GoogleWorkspaceGroupTemplate, existing_template_map: dict, group_dir: str, ) -> GoogleWorkspaceGroupTemplate: discovered_group_template.file_path = get_templated_resource_file_path( group_dir, discovered_group_template.properties.email, ) return common_create_or_update_template( discovered_group_template.file_path, existing_template_map, discovered_group_template.resource_id, GoogleWorkspaceGroupTemplate, {}, discovered_group_template.properties, [], )
[ 86, 894, 129, 846, 671 ]
def METHOD_NAME(cls, model_alias: str, object_type: Type[TopLevelOscalModel], args: argparse.Namespace) -> int: """Create a top level OSCAL object within the trestle directory, leveraging functionality in add.""" log.set_log_level_from_args(args) trestle_root = args.trestle_root # trestle root is set via command line in args. Default is cwd. if not trestle_root or not file_utils.is_valid_project_root(args.trestle_root): raise err.TrestleRootError(f'Given directory {trestle_root} is not a trestle project.') plural_path = ModelUtils.model_type_to_model_dir(model_alias) desired_model_dir = trestle_root / plural_path / args.output desired_model_path = desired_model_dir / (model_alias + '.' + args.extension) if desired_model_path.exists(): raise err.TrestleError(f'OSCAL file to be created here: {desired_model_path} exists.') # Create sample model. sample_model = generators.generate_sample_model(object_type, include_optional=args.include_optional_fields) # Presuming top level level model not sure how to do the typing for this. sample_model.metadata.title = f'Generic {model_alias} created by trestle named {args.output}.' sample_model.metadata.last_modified = datetime.now().astimezone() sample_model.metadata.oscal_version = trestle.oscal.OSCAL_VERSION sample_model.metadata.version = '0.0.0' top_element = Element(sample_model, model_alias) create_action = CreatePathAction(desired_model_path.resolve(), True) write_action = WriteFileAction( desired_model_path.resolve(), top_element, FileContentType.to_content_type(desired_model_path.suffix) ) # create a plan to write the directory and file. create_plan = Plan() create_plan.add_action(create_action) create_plan.add_action(write_action) create_plan.execute() return CmdReturnCodes.SUCCESS.value
[ 129, 279 ]
def METHOD_NAME(self): results1 = ((1496252939, 1496252944, 1), [1.0, None, None, None, 1.0]) results2 = ((1496252939, 1496252949, 1), [1.0, 1.0]) wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu') node1 = LeafNode('hosts.worker1.cpu', wr1) reader = MultiReader([node1]) (_, values) = reader.merge(results1, results2) self.assertEqual(values, [1.0, 1.0, None, None, 1.0, None, None, None, None, None])
[ 9, 457, 781, 411, 10786, 1038, 3368 ]
def METHOD_NAME(x): return tf.keras.activations.swish(x)
[ 14661 ]
def METHOD_NAME(text): return re.sub(r"\b(a|an|the)\b", " ", text)
[ 188, 4443 ]
def METHOD_NAME( network, throat_diameter='throat.diameter', throat_length='throat.length', ): r""" Calculate throat volume assuing a square cross-section Parameters ---------- %(network)s %(Dt)s %(Lt)s Returns ------- Notes ----- At present this models does NOT account for the volume reprsented by the intersection of the throat with a spherical pore body. """ leng = network[throat_length] diam = network[throat_diameter] value = leng*diam**2 return value
[ -1 ]
def METHOD_NAME(self, old_key): new_key = self.__class__._DEPRECATED_TO_NEW_MEMBERS.get(old_key, old_key) is_key_deprecated = old_key != new_key if is_key_deprecated: msg = "'{old_key}' is deprecated; use '{new_key}' instead.".format( old_key=old_key, new_key=new_key ) _warnings.warn(msg, DeprecationWarning) return new_key
[ 19, 80, 59 ]
def METHOD_NAME(layer_input, filter_size, kernel_size=4, strides=2, activation='leakyrelu', dropout_rate=g_dropout, norm='inst', dilation=1): c = AtrousConvolution2D(filter_size, kernel_size=kernel_size, strides=strides, atrous_rate=(dilation, dilation), padding='same')(layer_input) if activation == 'leakyrelu': c = ReLU()(c) if dropout_rate: c = Dropout(dropout_rate)(c) if norm == 'inst': c = InstanceNormalization()(c) return c
[ 4197, 56, 1306 ]
def METHOD_NAME(self) -> Optional[Sequence['outputs.FilterTrackSelectionResponse']]: """ The tracks selection conditions. """ return pulumi.get(self, "tracks")
[ 6520 ]
def METHOD_NAME(self, data, mode): """ Parse search results for items. :param data: The raw response from a search :param mode: The current mode used to search, e.g. RSS :return: A list of items found """ items = [] with BS4Parser(data, 'html5lib') as html: torrent_rows = html('div', class_='release_block') if len(torrent_rows) < 2: return items for row in torrent_rows[1:]: try: first_cell = row.find('div', class_='release_row_first') cells = row('div', class_='release_row') title = cells[1].find('div', class_='release_text_contents').get_text().strip() download_url = first_cell('a')[-1].get('href') if not all([title, download_url]): continue download_url = urljoin(self.url, download_url) # Provider does not support seeders or leechers. seeders = 1 leechers = 0 torrent_size = first_cell.find('div', class_='release_size').get_text() match_size = ShanaProjectProvider.size_regex.match(torrent_size) try: size = convert_size(match_size.group(1) + ' ' + match_size.group(2)) or -1 except AttributeError: size = -1 pubdate_raw = cells[0].find('div', class_='release_last').get_text() pubdate = self.parse_pubdate(pubdate_raw) item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': pubdate, } if mode != 'RSS': log.debug('Found result: {0} with {1} seeders and {2} leechers', title, seeders, leechers) items.append(item) except (AttributeError, TypeError, KeyError, ValueError, IndexError): log.exception('Failed parsing provider.') return items
[ 214 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("OperationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(): """ True if mandatory environment variables are set otherwise False and print error messages. """ no_missing_env_var = True if os.environ.get('CC_REPO_DIR') is None: LOG.error("When using gerrit output the 'CC_REPO_DIR' environment " "variable needs to be set to the root directory of the " "sources, i.e. the directory where the repository was " "cloned!") no_missing_env_var = False if os.environ.get('CC_CHANGED_FILES') is None: LOG.error("When using gerrit output the 'CC_CHANGED_FILES' " "environment variable needs to be set to the path of " "changed files json from Gerrit!") no_missing_env_var = False return no_missing_env_var
[ 4945, 485, 486, 137, 0 ]
def METHOD_NAME(self): assert VideoChatScheduled(self.start_date).start_date == self.start_date
[ 9, 391, 199 ]
def METHOD_NAME(*args, **kwargs): """Kicks off the build process.""" exit_code = 0 # 0 = success, 1 = failure frameworks = [kwargs.get("framework", False)] if frameworks == ["all"]: frameworks = armory_frameworks print(f"EXEC:\tRetrieved version {armory_version}.") print("EXEC:\tCleaning up...") for key in ["framework", "func"]: del kwargs[key] for framework in frameworks: print(f"EXEC:\tBuilding {framework} container.") if status := build_worker(framework, armory_version, **kwargs): exit_code = status sys.exit(exit_code)
[ 176 ]
def METHOD_NAME() -> PackageMakerWrapper: return PackageMakerWrapper()
[ 1159 ]
def METHOD_NAME(self): return self._errors
[ 1096 ]
def METHOD_NAME(self, name, type, value=None): if type not in self._OveridePcds.keys(): self._OveridePcds[type] = [] self._OveridePcds[type].append((name, value))
[ 238, 16833, 7398 ]
def METHOD_NAME(self): self.client.force_login(self.super_user) response = self.client.post(reverse('apimobile:sync_mobiles_state'), data={}) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'[]')
[ 9, 19, 164, 18, 4085, 5733 ]
def METHOD_NAME(self): print("starting python model optimization cross layer scaling test for non depthwise") # Generating random numbers from a normal distribution for the weights and biases of the current and prev layer np.random.seed(1) total = 2 * 3 * 2 * 2 weight1 = np.array(np.random.randn(total)) bias1 = np.array(np.random.randn(2)) weight2 = np.array(np.random.randn(total)) weight_sz1 = np.array([2, 3, 2, 2]) weight_sz2 = np.array([3, 2, 2, 2]) # Initializing the struct EqualizationParams prev_layer_params = libpymo.EqualizationParams() curr_layer_params = libpymo.EqualizationParams() prev_layer_params.weight = weight1 prev_layer_params.weightShape = weight_sz1 prev_layer_params.bias = bias1 prev_layer_params.isBiasNone = False curr_layer_params.weight = weight2 curr_layer_params.weightShape = weight_sz2 w1, w2, b1, scale_factor = cross_layer_scaling_python_implementation(weight1.reshape(weight_sz1), weight2.reshape(weight_sz2), bias1) rescaling_vector = libpymo.scaleLayerParams(prev_layer_params, curr_layer_params) assert (np.allclose(w1.flatten(), prev_layer_params.weight)) assert (np.allclose(w2.flatten(), curr_layer_params.weight)) assert (np.allclose(b1, prev_layer_params.bias)) assert (np.allclose(scale_factor, rescaling_vector))
[ 9, 436, 94, 500, 501, 434 ]
def METHOD_NAME(): """ "original": "Prerequisite: COMM1140 or ACCT1511 and COMM1180 or (COMM1140 and ECON1102) or FINS1613<br/><br/>", """ return "(COMM1140 || ACCT1511) && (COMM1180 || (COMM1140 && ECON1102) || FINS1613)"
[ 3221, 1305, -1 ]
def METHOD_NAME(self) -> None: """Update the action. Defined in child if necessary """
[ 86 ]
def METHOD_NAME(self): """ L{ResolverBase} provides the L{IResolver} interface. """ verifyClass(IResolver, ResolverBase)
[ 9, 1836, 414, 3075, 497, 1836 ]
def METHOD_NAME(train_partitions, val_partitions): """Return a function to construct a client. The VirtualClientEngine will exectue this function whenever a client is sampled by the strategy to participate. """ def client_fn(cid: str) -> fl.client.Client: """Construct a FlowerClient with its own dataset partition.""" # Extract partition for client with id = cid trainset, valset = train_partitions[int(cid)], val_partitions[int(cid)] # Create and return client return FlowerClient(trainset, valset) return client_fn
[ 19, 340, 667 ]
def METHOD_NAME(self): ## Verify login data_ret = {'adminUser': 'admin', 'adminPass': '1234567'} response = self.MakeRequest('verifyConn', data_ret) self.assertEqual(response['verifyConn'], 1)
[ 9, 1162, 4579 ]
def METHOD_NAME(self): return "GET"
[ 103 ]
def METHOD_NAME(dagster_docker_image, job_namespace): return { "execution": { "config": merge_dicts( ( { "job_image": dagster_docker_image, } if dagster_docker_image else {} ), { "job_namespace": job_namespace, "image_pull_policy": image_pull_policy(), "env_config_maps": ["non-existent-config-map"], }, ) }, }
[ 19, 3587, 5595, 202, 1693, 200 ]
def METHOD_NAME(a,is_singular_threshold=1e-16): svd_obj = real(a.deep_copy(),True,True,1e-16) u = svd_obj.u v = svd_obj.v sigma = svd_obj.sigma selection = flex.bool( sigma < flex.max( sigma )*is_singular_threshold ).iselection() inv_sigma = sigma.deep_copy() inv_sigma.set_selected(selection,1.0) inv_sigma = 1.0/inv_sigma inv_sigma.set_selected(selection,0.0) ia = scitbx.linalg.reconstruct_svd(v,u,inv_sigma) #.matrix_transpose(), inv_sigma) return(ia, sigma)
[ 3581, 2499, 7505 ]
def METHOD_NAME(endpoint: Optional[pulumi.Input[Optional[str]]] = None, id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRegionResult]: """ `get_region` provides details about a specific AWS region. As well as validating a given region name this resource can be used to discover the name of the region configured within the provider. The latter can be useful in a child module which is inheriting an AWS provider configuration from its parent module. ## Example Usage The following example shows how the resource might be used to obtain the name of the AWS region configured on the provider. ```python import pulumi import pulumi_aws as aws current = aws.get_region() ``` :param str endpoint: EC2 endpoint of the region to select. :param str name: Full name of the region to select. """ ...
[ 19, 1216, 146 ]
def METHOD_NAME(self): "altitude as a floating point value" return float(self.altitude)
[ 1819, 2342 ]
def METHOD_NAME(self, low=0, high=255): """ Compute a threshold mask for the array. """ return (self.image >= low) & (self.image <= high)
[ 853 ]
def METHOD_NAME(smoothed=True): mapper = { "X_spliced": "M_s" if smoothed else "X_spliced", "X_unspliced": "M_u" if smoothed else "X_unspliced", "X_new": "M_n" if smoothed else "X_new", "X_old": "M_o" if smoothed else "X_old", "X_total": "M_t" if smoothed else "X_total", # "X_uu": "M_uu" if smoothed else "X_uu", # "X_ul": "M_ul" if smoothed else "X_ul", # "X_su": "M_su" if smoothed else "X_su", # "X_sl": "M_sl" if smoothed else "X_sl", # "X_protein": "M_p" if smoothed else "X_protein", "X": "X" if smoothed else "X", } return mapper
[ 19, 3782 ]
def METHOD_NAME(exception): raise exception
[ 1471 ]
def METHOD_NAME(context: ScheduleEvaluationContext): scheduled_date = ( context.scheduled_execution_time.strftime("%Y-%m-%d") if context.scheduled_execution_time else datetime.datetime.now().strftime("%Y-%m-%d") ) return RunRequest( run_key=None, run_config={"ops": {"configurable_op": {"config": {"scheduled_date": scheduled_date}}}}, tags={"date": scheduled_date, "github_test": "test", "okay_t2": "okay"}, )
[ 13243, 202, 507 ]
def METHOD_NAME(filename, headers, rows, tz=None): """Send an XLSX file to the client. :param filename: The name of the CSV file :param headers: a list of cell captions :param rows: a list of dicts mapping captions to values :param tz: the timezone for the values that are datetime objects :return: a flask response containing the XLSX data """ buf = generate_xlsx(headers, rows, tz=tz) return send_file(filename, buf, 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', inline=False)
[ 353, 9009 ]
def METHOD_NAME(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(): filename = os.path.join(SAMPLES_DIR, "record_1_test_data.avro") data = [ {"name": "Pierre-Simon Laplace", "age": 77, "Numbers": "TWO"}, ] assert ak.from_avro_file(file=filename).to_list() == data
[ 9, 148, 1170 ]
def METHOD_NAME(self, list_domains): """ setter for self._list_domains """ self._list_domains = list_domains
[ 0, 245, 3902 ]
def METHOD_NAME(self): # Issue #10756: Make sure that an unnormalized exception is # handled properly atexit.register(lambda: 1 / 0) self.assertRaises(ZeroDivisionError, atexit._run_exitfuncs) self.assertIn("ZeroDivisionError", self.stream.getvalue())
[ 9, 241, 14049 ]
def METHOD_NAME(source1, source2, target): builder = Projection_Builder(source_stores=[source1, source2], target_store=target) items = next(iter(builder.get_items())) assert len(items) == 25
[ 9, 19, 1768 ]
def METHOD_NAME(self): self.name = "Poisson Equation - Forward" self.forward = True self.orderTest()
[ 9, 852, 76 ]
def METHOD_NAME(input_string, *, raise_on_unexpected=False): """Returns a tuple of bool (whether mis-encoded utf-8 is present) and str (the converted string)""" output = [] # individual characters, join at the end is_in_multibyte = False # True if we're currently inside a utf-8 multibyte character multibytes_expected = 0 multibyte_buffer = [] mis_encoded_utf8_present = False def handle_bad_data(index, character): if not raise_on_unexpected: # not raising, so we dump the buffer into output and append this character output.extend(multibyte_buffer) multibyte_buffer.clear() output.append(character) nonlocal is_in_multibyte is_in_multibyte = False nonlocal multibytes_expected multibytes_expected = 0 else: raise ValueError(f"Expected multibyte continuation at index: {index}") for idx, c in enumerate(input_string): code_point = ord(c) if code_point <= 0x7f or code_point > 0xf4: # ASCII Range data or higher than you get for mis-encoded utf-8: if not is_in_multibyte: output.append(c) # not in a multibyte, valid ascii-range data, so we append else: handle_bad_data(idx, c) else: # potentially utf-8 if (code_point & 0xc0) == 0x80: # continuation byte if is_in_multibyte: multibyte_buffer.append(c) else: handle_bad_data(idx, c) else: # start-byte if not is_in_multibyte: assert multibytes_expected == 0 assert len(multibyte_buffer) == 0 while (code_point & 0x80) != 0: multibytes_expected += 1 code_point <<= 1 multibyte_buffer.append(c) is_in_multibyte = True else: handle_bad_data(idx, c) if is_in_multibyte and len(multibyte_buffer) == multibytes_expected: # output utf-8 character if complete utf_8_character = bytes(ord(x) for x in multibyte_buffer).decode("utf-8") output.append(utf_8_character) multibyte_buffer.clear() is_in_multibyte = False multibytes_expected = 0 mis_encoded_utf8_present = True if multibyte_buffer: # if we have left-over data handle_bad_data(len(input_string), "") return mis_encoded_utf8_present, "".join(output)
[ 3690, 623, 1550, 4428 ]
def METHOD_NAME(self, cmd): if process.system(cmd, sudo=True, shell=True, ignore_status=True): self.fail("Command %s failed" % cmd)
[ 250, 374 ]
def METHOD_NAME(self) -> str: """ The type of the resource. """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(self): return self.env["account.payment.term"].create( { "name": "C/O 30", "riba": True, "riba_payment_cost": 5.00, "line_ids": [ ( 0, 0, { "value": "balance", "option": "day_following_month", "days": 1, }, ) ], } )
[ 129, -1 ]
def METHOD_NAME(name, orgname=None, profile="grafana"): """ Ensure that a data source is present. name Name of the data source to remove. orgname Name of the organization from which the data source should be absent. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. """ if isinstance(profile, str): profile = __salt__["config.option"](profile) ret = {"name": name, "result": None, "comment": None, "changes": {}} datasource = __salt__["grafana4.get_datasource"](name, orgname, profile) if not datasource: ret["result"] = True ret["comment"] = "Data source {} already absent".format(name) return ret if __opts__["test"]: ret["comment"] = "Datasource {} will be deleted".format(name) return ret __salt__["grafana4.delete_datasource"](datasource["id"], profile=profile) ret["result"] = True ret["changes"][name] = "Absent" ret["comment"] = "Data source {} was deleted".format(name) return ret
[ 1447 ]
def METHOD_NAME(): # type: () -> None # N.B.: 2.7.18 is EOL as is 3.5.10. assert ( list( itertools.chain( [(2, 7, patch) for patch in range(19)], [(3, 5, patch) for patch in range(1, 11)], ) ) == iter_compatible_versions("==2.7.*", ">3.5,<3.6") )
[ 9, 84, 3892, 295, 894 ]
def METHOD_NAME(standalone_parameters_dataset) -> None: parameters = interactive_widget._get_parameters( standalone_parameters_dataset ) assert bool(parameters["dependent"]) # not empty assert bool(parameters["independent"]) # not empty
[ 9, 19, 386 ]
def METHOD_NAME(name, src, dst): """Install a shared library from directory src to directory dst""" if sys.platform == "darwin": shlib0 = name + ".0.dylib" shlib = name + ".dylib" install(join_path(src, shlib0), join_path(dst, shlib0)) os.symlink(shlib0, join_path(dst, shlib)) else: shlib000 = name + ".so.0.0.0" shlib0 = name + ".so.0" shlib = name + ".dylib" install(join_path(src, shlib000), join_path(dst, shlib000)) os.symlink(shlib000, join_path(dst, shlib0)) os.symlink(shlib0, join_path(dst, shlib))
[ 428, 2804 ]
def METHOD_NAME(load): """ Return a dict of all symlinks based on a given path in the repo """ return _gitfs().METHOD_NAME(load)
[ 953, 245 ]
async def METHOD_NAME( self, resource_group_name: str, organization_name: str, body: "models.OrganizationResource", **kwargs ) -> "models.OrganizationResource": """Organization Validate proxy resource. Organization Validate proxy resource. :param resource_group_name: Resource group name. :type resource_group_name: str :param organization_name: Organization resource name. :type organization_name: str :param body: Organization resource model. :type body: ~azure.mgmt.confluent.models.OrganizationResource :keyword callable cls: A custom type or function that will be passed the direct response :return: OrganizationResource, or the result of cls(response) :rtype: ~azure.mgmt.confluent.models.OrganizationResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.OrganizationResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-03-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.METHOD_NAME.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'organizationName': self._serialize.url("organization_name", organization_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(body, 'OrganizationResource') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.ResourceProviderDefaultErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('OrganizationResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
[ 187, 1044 ]
def METHOD_NAME(dist: Path, version: str) -> None: items = [item for item in dist.iterdir() if item_ok_for_pypi(item.name)] assert len(items) > 10 assert all(version in item.name for item in items) assert any(item.name.endswith("py3-none-any.whl") for item in items)
[ 7532, 250, 1260 ]
def METHOD_NAME(self): pass
[ 72, 710 ]
def METHOD_NAME(self) -> bool: """ Does this perspective allow pasting?""" return False
[ 1466, 8151 ]
def METHOD_NAME(self, path): """Check that the given path isn't a symlink or outside the doc root.""" path = Path(path) resolved_path = path.resolve() if path.is_symlink(): msg = "Suspicious operation over a symbolic link." log.error(msg, path=str(path), resolved_path=str(resolved_path)) raise SuspiciousFileOperation(msg) docroot = Path(settings.DOCROOT).absolute() if not path.is_relative_to(docroot): msg = "Suspicious operation outside the docroot directory." log.error(msg, path=str(path), resolved_path=str(resolved_path)) raise SuspiciousFileOperation(msg)
[ 250, 4760, 157 ]
def METHOD_NAME(self): """ Returns a multi-line string that contains the configuration of ApMon. This string can be passed to the setDestination method(or to the constructor). It has the same structure as the config file/url contents. """ return {}
[ 19, 200 ]
def METHOD_NAME(alert: Alert) -> Alert: wanted_plugins, wanted_config = plugins.routing(alert) skip_plugins = False for plugin in wanted_plugins: if alert.is_suppressed: skip_plugins = True break try: alert = plugin.pre_receive(alert, config=wanted_config) except TypeError: alert = plugin.pre_receive(alert) # for backward compatibility except (RejectException, HeartbeatReceived, BlackoutPeriod, RateLimit, ForwardingLoop, AlertaException): raise except Exception as e: if current_app.config['PLUGINS_RAISE_ON_ERROR']: raise RuntimeError(f"Error while running pre-receive plugin '{plugin.name}': {str(e)}") else: logging.error(f"Error while running pre-receive plugin '{plugin.name}': {str(e)}") if not alert: raise SyntaxError(f"Plugin '{plugin.name}' pre-receive hook did not return modified alert") try: is_duplicate = alert.is_duplicate() if is_duplicate: alert = alert.deduplicate(is_duplicate) else: is_correlated = alert.is_correlated() if is_correlated: alert = alert.update(is_correlated) else: alert = alert.create() except Exception as e: raise ApiError(str(e)) wanted_plugins, wanted_config = plugins.routing(alert) alert_was_updated: bool = False for plugin in wanted_plugins: if skip_plugins: break try: updated = plugin.post_receive(alert, config=wanted_config) except TypeError: updated = plugin.post_receive(alert) # for backward compatibility except AlertaException: raise except Exception as e: if current_app.config['PLUGINS_RAISE_ON_ERROR']: raise ApiError(f"Error while running post-receive plugin '{plugin.name}': {str(e)}") else: logging.error(f"Error while running post-receive plugin '{plugin.name}': {str(e)}") if updated: alert = updated alert_was_updated = True if alert_was_updated: alert.update_tags(alert.tags) alert.attributes = alert.update_attributes(alert.attributes) return alert
[ 356, 2941 ]
def METHOD_NAME(self, url: str, download: bool = False) -> Dict: """ Get metadata for a download using yt-dlp. ### Arguments - url: The url to get metadata for. ### Returns - A dictionary containing the metadata. """ url_id = url.split("?v=")[1] piped_data = requests.get( f"https://pipedapi.kavin.rocks/streams/{url_id}", timeout=10 ).json() yt_dlp_json = { "title": piped_data["title"], "id": url_id, "view_count": piped_data["views"], "extractor": "Generic", "formats": [], } for audio_stream in piped_data["audioStreams"]: yt_dlp_json["formats"].append( { "url": audio_stream["url"], "ext": "webm" if audio_stream["codec"] == "opus" else "m4a", "abr": audio_stream["quality"].split(" ")[0], "filesize": audio_stream["contentLength"], } ) return self.audio_handler.process_video_result(yt_dlp_json, download=download)
[ 19, 136, 773 ]
def METHOD_NAME(self, false_update): handler = InlineQueryHandler(self.callback) assert not handler.check_update(false_update)
[ 9, 2395, 86, 119 ]
def METHOD_NAME(self, md): """Escape all.""" config = self.getConfigs() hardbreak = config['hardbreak'] md.inlinePatterns.register( EscapeAllPattern(ESCAPE_NO_NL_RE if hardbreak else ESCAPE_RE, config['nbsp'], md), "escape", 180 ) md.postprocessors.register(EscapeAllPostprocessor(md), "unescape", 10) if config['hardbreak']: md.inlinePatterns.register(SubstituteTagInlineProcessor(HARDBREAK_RE, 'br'), "hardbreak", 5.1)
[ 978, 108 ]
def METHOD_NAME(a: dace.float64[20], b: dace.float64[20]): if a[0] < 0.5: a += 0.5 c = nested(a, b) else: c = nested(a, b) return c
[ 4769 ]
def METHOD_NAME(self): self._change_state(SubscriptionStatus.ACTIVE)
[ 1743, 947, 923 ]
def METHOD_NAME(self): """ SettingsQR parser should read line breaks as acceptable separators """ settingsqr_data = "settings::v1\nname=Foo\nsigs=ss,ms\nscripts=nat,nes,tr\nxpub_export=E\n" config_name, settings_update_dict = Settings.parse_settingsqr(settingsqr_data) assert len(settings_update_dict.keys()) == 3 # Accepts update with no Exceptions self.settings.update(new_settings=settings_update_dict)
[ 9, 6670, 5378, 534, 699, 6671 ]
def METHOD_NAME(): # for this test alpha is sf probability, i.e. right tail probability alpha = np.array([0.2, 0.15, 0.1, 0.05, 0.01, 0.001])[::-1] size = np.array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 40, 100, 400, 900], float) # critical values, rows are by sample size, columns are by alpha crit_lf = np.array([[303, 321, 346, 376, 413, 433], [289, 303, 319, 343, 397, 439], [269, 281, 297, 323, 371, 424], [252, 264, 280, 304, 351, 402], [239, 250, 265, 288, 333, 384], [227, 238, 252, 274, 317, 365], [217, 228, 241, 262, 304, 352], [208, 218, 231, 251, 291, 338], [200, 210, 222, 242, 281, 325], [193, 202, 215, 234, 271, 314], [187, 196, 208, 226, 262, 305], [181, 190, 201, 219, 254, 296], [176, 184, 195, 213, 247, 287], [171, 179, 190, 207, 240, 279], [167, 175, 185, 202, 234, 273], [163, 170, 181, 197, 228, 266], [159, 166, 176, 192, 223, 260], [143, 150, 159, 173, 201, 236], [131, 138, 146, 159, 185, 217], [115, 120, 128, 139, 162, 189], [74, 77, 82, 89, 104, 122], [37, 39, 41, 45, 52, 61], [25, 26, 28, 30, 35, 42]])[:, ::-1] / 1000. lf = TableDist(alpha, size, crit_lf) assert_almost_equal(lf.prob(0.166, 20), 0.15) assert_almost_equal(lf.crit(0.15, 20), 0.166) assert_almost_equal(lf.crit3(0.15, 20), 0.166) assert .159 <= lf.crit(0.17, 20) <= 166 assert .159 <= lf.crit3(0.17, 20) <= .166 assert .159 <= lf.crit(0.19, 20) <= .166 assert .159 <= lf.crit3(0.19, 20) <= .166 assert .159 <= lf.crit(0.199, 20) <= .166 assert .159 <= lf.crit3(0.199, 20) <= .166 # testing vals = [lf.prob(c, size[i]) for i in range(len(size)) for c in crit_lf[i]] vals = np.array(vals).reshape(-1, lf.n_alpha) delta = np.abs(vals) - lf.alpha assert_allclose(delta, np.zeros_like(delta)) # 1.6653345369377348e-16 vals = [lf.crit(c, size[i]) for i in range(len(size)) for c in lf.alpha] vals = np.array(vals).reshape(-1, lf.n_alpha) delta = np.abs(vals - crit_lf) assert_allclose(delta, np.zeros_like(delta)) # 6.9388939039072284e-18) print(np.max(np.abs(np.array( [lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha]).reshape(-1, lf.n_alpha) - crit_lf))) # 4.0615705243496336e-12) vals = [lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha[:-1] * 1.1] vals = np.array(vals).reshape(-1, lf.n_alpha - 1) assert (vals < crit_lf[:, :-1]).all() vals = [lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha[:-1] * 1.1] vals = np.array(vals).reshape(-1, lf.n_alpha - 1) assert (vals > crit_lf[:, 1:]).all() vals = [lf.prob(c * 0.9, size[i]) for i in range(len(size)) for c in crit_lf[i, :-1]] vals = np.array(vals).reshape(-1, lf.n_alpha - 1) assert (vals > lf.alpha[:-1]).all() vals = [lf.prob(c * 1.1, size[i]) for i in range(len(size)) for c in crit_lf[i, 1:]] vals = np.array(vals).reshape(-1, lf.n_alpha - 1) assert (vals < lf.alpha[1:]).all() # start at size_idx=2 because of non-monotonicity of lf_crit vals = [lf.prob(c, size[i] * 0.9) for i in range(2, len(size)) for c in crit_lf[i, :-1]] vals = np.array(vals).reshape(-1, lf.n_alpha - 1) assert (vals > lf.alpha[:-1]).all()
[ 9, -1 ]
def METHOD_NAME(self): self.dps[PRESET_DPS] = "0" self.assertEqual(self.subject.preset_mode, "comfort") self.dps[PRESET_DPS] = "1" self.assertEqual(self.subject.preset_mode, "boost") self.dps[PRESET_DPS] = "2" self.assertEqual(self.subject.preset_mode, "eco") self.dps[PRESET_DPS] = None self.assertIs(self.subject.preset_mode, None)
[ 9, 2181, 854 ]
def METHOD_NAME(payload: PickupRequest, settings: Settings) -> Serializable: """ Create a pickup request Steps 1 - get availability 2 - create pickup :param payload: PickupRequest :param settings: Settings :return: Serializable """ request: Pipeline = Pipeline( get_availability=lambda *_: _get_availability( payload=payload, settings=settings ), create_pickup=partial(_create_pickup, payload=payload, settings=settings), ) return Serializable(request)
[ 7630, 377 ]
def METHOD_NAME(caplog: LogCaptureFixture) -> None: run_framework_test(caplog=caplog, framework=CheckType.ANSIBLE)
[ 9, 4090, 1486 ]
def METHOD_NAME(self, ac_id, msg): """Handle incoming messages Callback function for IvyMessagesInterface :param ac_id: aircraft id :type ac_id: int :param msg: message :type msg: PprzMessage """ # only show messages of the requested class if msg.msg_class != self.msg_class: return if ac_id in self.aircrafts and msg.name in self.aircrafts[ac_id].messages: if time.time() - self.aircrafts[ac_id].messages[msg.name].last_seen < 0.2: return wx.CallAfter(self.gui_update, ac_id, msg)
[ 277, 1398 ]
def METHOD_NAME(self) -> str: """ A JSON string containing the properties of the provider instance. """ return pulumi.get(self, "properties")
[ 748 ]
def METHOD_NAME( undiscounted: TaxedMoneyRange, discounted: TaxedMoneyRange ) -> Optional[TaxedMoney]: """Calculate the discount amount between two TaxedMoneyRange. Subtract two prices and return their total discount, if any. Otherwise, it returns None. """ return _get_total_discount(undiscounted.start, discounted.start)
[ 19, 395, 1596, 280, 661 ]
def METHOD_NAME(self): model_id = 'damo/cv_nextvit-small_image-classification_Dailylife-labels' kwargs = dict( model=model_id, work_dir=self.tmp_dir, train_dataset=None, eval_dataset=self.eval_dataset) trainer = build_trainer( name=Trainers.image_classification, default_args=kwargs) result = trainer.evaluate() print(result)
[ 9, 14254, 14255, 1171 ]
def METHOD_NAME(self, p): self.sendUpdate("setP", [p])
[ 227, 0, 2054 ]
def METHOD_NAME(certs2, time_now, changed): if "kube-etcd" in changed: for key in certs2: if "kube-etcd" in key: changed.append(key) changed.remove("kube-etcd") for i in changed: assert(certs2[i] > (time_now + datetime.timedelta(days=3650)))
[ 979, 1180 ]
def METHOD_NAME(client): response = client.get("/dev/examples/modal") assert response.status_code == 200 assert "jf-modal-dialog" in response.context["modal_html"] # We need to make the main content hidden when pre-rendering, # or else the content behind the modal will be keyboard-navigable. # Perhaps someday when the "inert" attribute is widely supported, # we could use that instead. assert b'<div id="main" hidden' in response.content
[ 9, 254, 41, 8958, 9094, 3160 ]
def METHOD_NAME(): raise OSError(uos.errno())
[ 241, 168 ]
def METHOD_NAME(gwid=None, key=None): """ this function deletes an option of a gateway definition :param gwid: The id of the sms gateway definition :return: json with success or fail """ type = "option" if "." in key: type, key = key.split(".", 1) res = delete_smsgateway_key_generic(gwid, key, Type=type) g.audit_object.log({"success": res, "info": "{0!s}/{1!s}".format(gwid, key)}) return send_result(res)
[ 34, 14, 1335 ]
def METHOD_NAME(x, mu, sigma): return numpy.exp(-numpy.power(x - mu, 2.) / (2 * numpy.power(sigma, 2.)))
[ 4008 ]
def METHOD_NAME(self, ret_type: _RetType | None = None) -> Iterator[Any]: ...
[ 75, 1491 ]
def METHOD_NAME(window_length): """ periodic_hann """ return 0.5 - (0.5 * np.cos(2 * np.pi / window_length * np.arange(window_length)))
[ 2728, 13393 ]
def METHOD_NAME(self): # ToDo: implement correct tracking in case code is used directly on raspi self.stop = True
[ 631, 2271 ]
def METHOD_NAME(self) -> int: ...
[ 6359 ]
f METHOD_NAME(self, name, dst_directory):
[ 136, 3283 ]
def METHOD_NAME(self): return [torch.float, torch.bfloat16]
[ 616, 4303 ]
def METHOD_NAME(self): """ Delete the model comparison """ return self.client._perform_empty("DELETE", "/projects/%s/modelcomparisons/%s" % (self.project_key, self.mec_id))
[ 34 ]
def METHOD_NAME(): connector = MSSQLConnector(name='my_mssql_con', host='myhost', user='myuser') assert connector.get_connection_params(None) == { 'driver': '{ODBC Driver 17 for SQL Server}', 'server': 'myhost', 'user': 'myuser', 'as_dict': True, } connector = MSSQLConnector( name='my_mssql_con', host='myhost', user='myuser', password='mypass', port=123, connect_timeout=60, ) assert connector.get_connection_params('mydb') == { 'driver': '{ODBC Driver 17 for SQL Server}', 'server': 'myhost,123', 'user': 'myuser', 'as_dict': True, 'password': 'mypass', 'timeout': 60, 'database': 'mydb', }
[ 9, 550, 434 ]
def METHOD_NAME(self): # Start node0. We don't start the other nodes yet since # we need to pre-mine a block with an invalid transaction # signature so we can pass in the block hash as assumevalid. self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
[ 102, 1228 ]
def METHOD_NAME(self) -> Optional[StateVector]: """The sensor velocity on a 3D Cartesian plane, expressed as a 3x1 :class:`StateVector` of Cartesian coordinates in the order :math:`x,y,z`. .. note:: This property delegates the actual calculation of velocity to the Sensor's :attr:`movement_controller` It is settable if, and only if, the sensor holds its own internal movement_controller which is a :class:`~.MovingMovable`.""" return self.movement_controller.METHOD_NAME
[ 5311 ]
def METHOD_NAME(private_key: RSAPrivateKey) -> RSAPublicKey: return private_key.METHOD_NAME()
[ 1609, 59 ]
def METHOD_NAME(self, spec, prefix): pass
[ 334 ]
def METHOD_NAME(self, scope, indices, batch=1000, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ "SCAN", timestamp, self.namespace, self.bands, self.interval, self.retention, self.candidate_set_limit, scope, ] cursors = {idx: 0 for idx in indices} while cursors: requests = [] for idx, cursor in cursors.items(): requests.append([idx, cursor, batch]) responses = self.__index(scope, arguments + flatten(requests)) for (idx, _, _), (cursor, chunk) in zip(requests, responses): cursor = int(cursor) if cursor == 0: del cursors[idx] else: cursors[idx] = cursor yield idx, chunk
[ 793 ]
def METHOD_NAME(self, request, domain): if 'explosion_id' in request.POST: return self.delete_cases(request, domain) else: return self.explode_cases(request, domain)
[ 72 ]
def METHOD_NAME(ndim): shape = (5,) * ndim np_arr = mk_seq_array(np, shape) num_arr = mk_seq_array(num, shape) np_mask = (np_arr % 2).astype(bool) num_mask = (num_arr % 2).astype(bool) # scalar_val np.putmask(np_arr, np_mask, -10) num.putmask(num_arr, num_mask, -10) assert np.array_equal(np_arr, num_arr) # val is the same shape: np_val = np_arr * 10 num_val = num_arr * 10 np.putmask(np_arr, np_mask, np_val) num.putmask(num_arr, num_mask, num_val) assert np.array_equal(np_arr, num_arr) # val is different shape, but the same size shape_val = (np_arr.size,) np_values = mk_seq_array(np, shape_val) * 10 num_values = mk_seq_array(num, shape_val) * 10 np.putmask(np_arr, np_mask, np_values) num.putmask(num_arr, num_mask, num_values) assert np.array_equal(np_arr, num_arr) # val is different shape and smaller size for vals and array shape_val = (2,) * ndim np_values = mk_seq_array(np, shape_val) * 10 num_values = mk_seq_array(num, shape_val) * 10 np.putmask(np_arr, np_mask, np_values) num.putmask(num_arr, num_mask, num_values) assert np.array_equal(np_arr, num_arr) # val is different shape and bigger size for vals and array shape_val = (10,) * ndim np_values = mk_seq_array(np, shape_val) * 10 num_values = mk_seq_array(num, shape_val) * 10 np.putmask(np_arr, np_mask, np_values) num.putmask(num_arr, num_mask, num_values) assert np.array_equal(np_arr, num_arr)
[ 9, 4333 ]
def METHOD_NAME(self): opt_name = "{}_application".format(self.options.application) if not getattr(self.options["magnum"], opt_name): raise ConanInvalidConfiguration("Magnum needs option '{opt}=True'".format(opt=opt_name)) if self.settings.os == "Emscripten" and self.options["magnum"].target_gl == "gles2": raise ConanInvalidConfiguration("OpenGL ES 3 required, use option 'magnum:target_gl=gles3'")
[ 187 ]
def METHOD_NAME(): assert_allclose( np.not_equal(0.5, 0.6), not_equal(0.5, 0.6), atol=1e-7, check_dtype=False )
[ 9, 130, 926 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_request( api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.group_id = AAZStrArg( options=["-n", "--name", "--group-id"], help="The name of the private link resource", required=True, id_part="child_name_1", ) _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.workspace_name = AAZStrArg( options=["--workspace-name"], help="The name of the workspace.", required=True, id_part="name", fmt=AAZStrArgFormat( max_length=64, min_length=3, ), ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(): return ""
[ 19, 1553, 77, 14587, 1126 ]