text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(parent: Optional[pulumi.Input[str]] = None, short_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTagKeyResult]: """ Get a tag key by org or project `parent` and `short_name`. ## Example Usage ```python import pulumi import pulumi_gcp as gcp environment_tag_key = gcp.tags.get_tag_key(parent="organizations/12345", short_name="environment") ``` ```python import pulumi import pulumi_gcp as gcp environment_tag_key = gcp.tags.get_tag_key(parent="projects/abc", short_name="environment") ``` :param str parent: The resource name of the parent organization or project. It can be in format `organizations/{org_id}` or `projects/{project_id_or_number}`. :param str short_name: The tag key's short_name. """ ...
[ 19, 82, 59, 146 ]
def METHOD_NAME(n, mod=2): return np.arange(0, n, dtype=np.uint8) % mod
[ 3203 ]
def METHOD_NAME(): @nb.njit def add_a_datetime(builder, datetime): builder.datetime(datetime) return builder builder = add_a_datetime(ak.ArrayBuilder(), np.datetime64("2020-09-04")) out = builder.snapshot() assert out.to_list() == [np.datetime64("2020-09-04")]
[ 9, 877, 348, 47, 9858 ]
def METHOD_NAME(self, resources=None, batch_size=settings.BULK_IMPORT_BATCH_SIZE, quiet=False): """ Indexes a list of resources in bulk to Elastic Search Keyword Arguments: resources -- the list of resource instances to index batch_size -- the number of records to index as a group, the larger the number to more memory required quiet -- Silences the status bar output during certain operations, use in celery operations for example Return: None """ start = datetime.now() q = Query(se=self.se) self.se.refresh(index=self.index_name) count_before = self.se.count(index=self.index_name, body=q.dsl) result_summary = {"database": len(resources), "indexed": 0} if quiet is False: bar = pyprind.ProgBar(len(resources), bar_char="█") if len(resources) > 1 else None with self.se.BulkIndexer(batch_size=batch_size, refresh=True) as indexer: for resource in resources: if quiet is False and bar is not None: bar.update(item_id=resource) tiles = list(models.TileModel.objects.filter(resourceinstance=resource)) document, doc_id = self.get_documents_to_index(resource, tiles) if document is not None and id is not None: indexer.add(index=self.index_name, id=doc_id, data=document) self.se.refresh(index=self.index_name) result_summary["indexed"] = self.se.count(index=self.index_name, body=q.dsl) - count_before status = "Passed" if result_summary["database"] == result_summary["indexed"] else "Failed" print(f"Custom Index - {settings.ELASTICSEARCH_PREFIX}_{self.index_name}") print( f" Status: {status}, In Database: {result_summary['database']}, Indexed: {result_summary['indexed']}, Took: {(datetime.now() - start).seconds} seconds" )
[ 724, 1614 ]
def METHOD_NAME(self) -> str: """ Resource region. """ return pulumi.get(self, "region")
[ 1216 ]
def METHOD_NAME( files_or_modules: Sequence[str], source_roots: Sequence[str], ignore_list: list[str], ignore_list_re: list[Pattern[str]], ignore_list_paths_re: list[Pattern[str]], ) -> tuple[dict[str, ModuleDescriptionDict], list[ErrorDescriptionDict]]: """Take a list of files/modules/packages and return the list of tuple (file, module name) which have to be actually checked. """ result: dict[str, ModuleDescriptionDict] = {} errors: list[ErrorDescriptionDict] = [] path = sys.path.copy() for something in files_or_modules: basename = os.path.basename(something) if _is_ignored_file( something, ignore_list, ignore_list_re, ignore_list_paths_re ): continue module_package_path = discover_package_path(something, source_roots) additional_search_path = [".", module_package_path, *path] if os.path.exists(something): # this is a file or a directory try: modname = ".".join( modutils.modpath_from_file(something, path=additional_search_path) ) except ImportError: modname = os.path.splitext(basename)[0] if os.path.isdir(something): filepath = os.path.join(something, "__init__.py") else: filepath = something else: # suppose it's a module or package modname = something try: filepath = modutils.file_from_modpath( modname.split("."), path=additional_search_path ) if filepath is None: continue except ImportError as ex: errors.append({"key": "fatal", "mod": modname, "ex": ex}) continue filepath = os.path.normpath(filepath) modparts = (modname or something).split(".") try: spec = modutils.file_info_from_modpath( modparts, path=additional_search_path ) except ImportError: # Might not be acceptable, don't crash. is_namespace = False is_directory = os.path.isdir(something) else: is_namespace = modutils.is_namespace(spec) is_directory = modutils.is_directory(spec) if not is_namespace: if filepath in result: # Always set arg flag if module explicitly given. result[filepath]["isarg"] = True else: result[filepath] = { "path": filepath, "name": modname, "isarg": True, "basepath": filepath, "basename": modname, } has_init = ( not (modname.endswith(".__init__") or modname == "__init__") and os.path.basename(filepath) == "__init__.py" ) if has_init or is_namespace or is_directory: for subfilepath in modutils.get_module_files( os.path.dirname(filepath), ignore_list, list_all=is_namespace ): if filepath == subfilepath: continue if _is_in_ignore_list_re( os.path.basename(subfilepath), ignore_list_re ) or _is_in_ignore_list_re(subfilepath, ignore_list_paths_re): continue modpath = _modpath_from_file( subfilepath, is_namespace, path=additional_search_path ) submodname = ".".join(modpath) # Preserve arg flag if module is also explicitly given. isarg = subfilepath in result and result[subfilepath]["isarg"] result[subfilepath] = { "path": subfilepath, "name": submodname, "isarg": isarg, "basepath": filepath, "basename": modname, } return result, errors
[ 2450, 468 ]
f METHOD_NAME(path, file):
[ 203, 2518, 171 ]
def METHOD_NAME(sender, **kwargs): """ When locale is removed from a project, delete TranslatedResources and aggregate project and locale stats. """ project_locale = kwargs.get("instance", None) if project_locale is not None: project = project_locale.project locale = project_locale.locale TranslatedResource.objects.filter( resource__project=project, locale=locale ).delete() project.aggregate_stats() locale.aggregate_stats()
[ 155, 779, 674 ]
def METHOD_NAME(client, mount_point, num_files, num_dirs): for _ in range(1, num_files + num_dirs): try: client.exec_command( sudo=True, cmd=f"ls -laRt {mount_point}/", ) except FileNotFoundError as e: error_message = str(e) if "No such file or directory" not in error_message: raise OperationFailedError("failed to perform lookups") log.warning(f"Ignoring error: {error_message}") except Exception: raise OperationFailedError("failed to perform lookups")
[ 407, 3422 ]
def METHOD_NAME(param_file,cfdict,lev=0): """Recursive function that writes ILAMB configuration file from dictionary in cmec.json. Each nested dictionary is treated as a new sub section (h1, h2, or a data source) Parameters ---------- param_file: str open parameter file to write to cfdict: str dictionary with the configuration file contents lev: int counter that keeps track of the heading level """ head = {0: "h1: ", 1: "h2: ", 2: ""} for key in cfdict: if isinstance(cfdict[key], str): param_file.write("{0} = \"{1}\"\n".format(key, cfdict[key])) elif isinstance(cfdict[key], (int,float,bool)): param_file.write("{0} = {1}\n".format(key, cfdict[key])) elif isinstance(cfdict[key], dict): param_file.write("[{0}{1}]\n".format(head[lev],key)) METHOD_NAME(param_file,cfdict[key],lev=lev+1) return
[ 77, -1, 2610 ]
def METHOD_NAME(self, lines): if len(lines) == 0: return text = [] for line in lines: line = line.strip() if len(line) == 0: break else: text.append(line) text = ' '.join(text) sep_idx = text.find(':') + 1 if text.startswith(('Fixed Bug', 'Bug Fix')): self._fixes.append(text[sep_idx:]) elif text.startswith(('Feature', 'Task')): self._features.append(text[sep_idx:])
[ 356, 1160 ]
def METHOD_NAME(self, name): """ Removes package [name]. """ r_cmd = self.base_command + " " + "erase" + " " + name try: process.system(r_cmd, sudo=True) return True except process.CmdError: return False
[ 188 ]
def METHOD_NAME(self, cfg: DictConfig): tokenizer = AutoTokenizer.from_pretrained( cfg.tokenizer_name, vocab_file=cfg.vocab_file, do_lower_case=cfg.do_lower_case ) self.tokenizer = tokenizer
[ 102, 1345 ]
def METHOD_NAME(name: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatastoreResult: """ Azure Resource Manager resource envelope. Azure REST API version: 2023-04-01. :param str name: Datastore name. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str workspace_name: Name of Azure Machine Learning workspace. """ __args__ = dict() __args__['name'] = name __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices:getDatastore', __args__, opts=opts, typ=GetDatastoreResult).value return AwaitableGetDatastoreResult( datastore_properties=pulumi.get(__ret__, 'datastore_properties'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'))
[ 19, 914 ]
def METHOD_NAME(self, job, grouping_key=None, handler=default_handler) -> None: """Push metrics to the given pushgateway. `job` is the job label to be attached to all pushed metrics `grouping_key` please see the pushgateway documentation for details. Defaults to None `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. If not None, the argument must be a function which accepts the following arguments: url, method, timeout, headers, and content May be used to implement additional functionality not supported by the built-in default handler (such as SSL client certicates, and HTTP authentication mechanisms). 'url' is the URL for the request, the 'gateway' argument described earlier will form the basis of this URL. 'method' is the HTTP method which should be used when carrying out the request. 'timeout' requests not successfully completed after this many seconds should be aborted. If timeout is None, then the handler should not set a timeout. 'headers' is a list of ("header-name","header-value") tuples which must be passed to the pushgateway in the form of HTTP request headers. The function should raise an exception (e.g. IOError) on failure. 'content' is the data which should be used to form the HTTP Message Body. This overwrites all metrics with the same job and grouping_key. This uses the PUT HTTP method. """ prometheus_client.METHOD_NAME( gateway=self.gateway, job=job, registry=self._registry, grouping_key=grouping_key, timeout=self.timeout, handler=handler, )
[ 1013, 24, 14 ]
def METHOD_NAME(minimal_swagger_spec): float_spec = {'type': 'number', 'format': 'float'} result = to_python(minimal_swagger_spec, float_spec, float(3.14)) assert 3.14 == result assert isinstance(result, float)
[ 9, 1819 ]
def METHOD_NAME(self) -> None: """Prints out a human-readable description of the bool artifact.""" input_operator = self._dag.must_get_operator(with_output_artifact_id=self._artifact_id) readable_dict = super()._describe() if get_operator_type(input_operator) is OperatorType.CHECK: general_dict = get_readable_description_for_check(input_operator) # Remove because values already in `readable_dict` general_dict.pop("Label") general_dict.pop("Granularity") readable_dict.update(general_dict) readable_dict["Inputs"] = [ self._dag.must_get_artifact(artf).name for artf in input_operator.inputs ] print(format_header_for_print(f"'{input_operator.name}' Bool Artifact")) print(json.dumps(readable_dict, sort_keys=False, indent=4))
[ 2517 ]
def METHOD_NAME(self): rds = dns.rdataset.from_text("in", "a", 300, "10.0.0.1") self.assertFalse(rds == 123)
[ 9, 654, 926, 24, 2395 ]
def METHOD_NAME(self): date = dt.date(1900, 2, 18) expected = { 'year': 1900, 'month': 1, 'date': 18 } self.assertEqual(self.to_json(date, self.dummy_manager), expected)
[ 9, 183, 153 ]
def METHOD_NAME(self): tracer = trace.get_tracer(__name__) with tracer.start_as_current_span( "requests HTTP GET" ) as span, set_ip_on_next_http_connection(span): with tracer.start_as_current_span( "urllib3 HTTP GET" ) as span2, set_ip_on_next_http_connection(span2): resp, body = self.perform_request() assert resp.status == 200 assert body == b"Hello!" for span in self.assert_span(num_spans=2): self.assertEqual(span.attributes, {"net.peer.ip": "127.0.0.1"})
[ 9, 41, 612, 1244 ]
def METHOD_NAME(dataset): return dataset.batch(100).map(lambda x: x + 1)
[ 3653, 1170 ]
def METHOD_NAME(): n, m = tir.Var("n", "int64"), tir.Var("m", "int64") s0 = rx.TensorStructInfo([1, n + 1, m], "float32") s1 = rx.TensorStructInfo(rx.ShapeExpr([1, n + 1, m]), "float32") _check_equal(s0, s1) assert s0 == s1 assert s0.ndim == 3 assert s1.ndim == 3 assert isinstance(s0, rx.TensorStructInfo) _check_json_roundtrip(s0) _check_json_roundtrip(s1) s2 = rx.TensorStructInfo(ndim=2, dtype="int32") assert s2.ndim == 2 assert s2.dtype == "int32" assert s2.shape is None _check_json_roundtrip(s2) assert s0 != s2 # take in opaque var rshape = rx.Var("shape", rx.ShapeStructInfo(ndim=2)) s3 = rx.TensorStructInfo(rshape, dtype="int32") assert s3.dtype == "int32" assert s3.shape == rshape assert s3.ndim == 2 _check_json_roundtrip(s3) # can turn into str str(s0) # cannot pass both ndim and values with pytest.raises(ValueError): rx.TensorStructInfo([1, 2], ndim=3) # cannot pass both ndim and values even if they are consistent with pytest.raises(ValueError): rx.TensorStructInfo([1, 2], ndim=2)
[ 9, 768, 1755, 100 ]
def METHOD_NAME(test_suite): ret_val = True if "tests" not in test_suite: print('No "tests" key found with list of tests') ret_val = False else: for test in test_suite["tests"]: if not is_valid_test(test): ret_val = False break return ret_val
[ 137, 1205, 9, 482 ]
def METHOD_NAME(prefixes_and_configs: list[tuple[str, dict[str, Any]]]) -> dict[str, Any]: """ Merge two PII configs into one, prefixing all custom rules with a prefix in the name. This is used to apply organization and project configs at once, and still get unique references to rule names. """ merged_config: dict[str, Any] = {} for prefix, partial_config in prefixes_and_configs: if not partial_config: continue rules = partial_config.get("rules") or {} for rule_name, rule in rules.items(): prefixed_rule_name = f"{prefix}{rule_name}" merged_config.setdefault("rules", {})[ prefixed_rule_name ] = _prefix_rule_references_in_rule(rules, rule, prefix) for selector, applications in (partial_config.get("applications") or {}).items(): merged_applications = merged_config.setdefault("applications", {}).setdefault( selector, [] ) for application in applications: if application in rules: prefixed_rule_name = f"{prefix}{application}" merged_applications.append(prefixed_rule_name) else: merged_applications.append(application) return merged_config
[ 411, 12682, 736 ]
def METHOD_NAME(self, log): """ Test that initialize call count does not increase on repeat calls. """ AppConfig.initialize() calls = log.info.call_count AppConfig.initialize() self.assertTrue( calls == log.info.call_count and calls > 0, "AxesConfig.initialize needs to be re-entrant", )
[ 9, 7272, 200, 390, 2647, -1 ]
def METHOD_NAME(self) -> Optional[str]: """Get the run identifier associated with the current engine/runner pair.""" return ( self._runner_engine_pair.run_id if self._runner_engine_pair is not None else None )
[ 1056, 22, 147 ]
def METHOD_NAME(record, genome_version): transcript = { "gene_id": record["gene_id"], "transcript_id": record["transcript_id"], "chrom_grch{}".format(genome_version): record["chrom"], "start_grch{}".format(genome_version): record["start"], "end_grch{}".format(genome_version): record["end"], "strand_grch{}".format(genome_version): record["strand"], } if 'MANE_Select' in record.get('tag', []): transcript['is_mane_select'] = True return transcript
[ 214, 5612, 148 ]
METHOD_NAME(lines, header):
[ 356, 1571 ]
def METHOD_NAME(self, item): item, k1, k2, k3, k4 = item return self.eri.METHOD_NAME(item, (k1, k2, k3, k4)) / self.nk
[ 10192, 13119 ]
def METHOD_NAME(type_attr: Optional[str]) -> str: return "R" if type_attr is None else type_attr
[ 864, 24, 44, 59 ]
def METHOD_NAME(): p = pickle.dumps(O1) v = pickle.loads(p) assert O1 == v
[ 3446, 1385 ]
def METHOD_NAME(filename): """Unpack a zipfile, using the names in the zip.""" with open(filename, 'rb') as fzip: z = zipfile.ZipFile(fzip) for name in z.namelist(): print(" extracting {}".format(name)) ensure_dirs(name) z.extract(name)
[ 789, 4424 ]
METHOD_NAME(self):
[ 1013, 223, 660 ]
def METHOD_NAME(repository_ctx): # When these values are updated: # - tools/dynamic_analysis/tsan.supp may also need updating # - LICENSE.third_party may also need updating to match # https://docs.mosek.com/latest/licensing/license-agreement-info.html mosek_major_version = 10 mosek_minor_version = 0 mosek_patch_version = 46 os_result = determine_os(repository_ctx) if os_result.is_macos or os_result.is_macos_wheel: if os_result.macos_arch_result == "arm64": mosek_platform = "osxaarch64" sha256 = "85724bd519d5fe120b4e8d2676b65143b9ce6dce666a07ca4f44ec54727b5ab5" # noqa else: mosek_platform = "osx64x86" sha256 = "16885bbee2c1d86e0a3f9d9a2c60bbab1bb88e6f1b843ac1fb8da0c62292344f" # noqa elif os_result.is_ubuntu or os_result.is_manylinux: mosek_platform = "linux64x86" sha256 = "a6862954137493b74f55c0f2745b7f1672e602cfe9cd8974a95feaf9993f06bf" # noqa else: fail( "Operating system is NOT supported", attr = repository_ctx.os.name, ) # TODO(jwnimmer-tri) Port to use mirrors.bzl. template = "https://download.mosek.com/stable/{}.{}.{}/mosektools{}.tar.bz2" # noqa url = template.format( mosek_major_version, mosek_minor_version, mosek_patch_version, mosek_platform, ) root_path = repository_ctx.path("") strip_prefix = "mosek/{}.{}".format( mosek_major_version, mosek_minor_version, ) repository_ctx.download_and_extract( url, root_path, sha256 = sha256, stripPrefix = strip_prefix, ) platform_prefix = "tools/platform/{}".format(mosek_platform) if repository_ctx.os.name == "mac os x": install_name_tool = which(repository_ctx, "install_name_tool") files = [ "bin/libtbb.12.dylib", "bin/libtbb.12.5.dylib", "bin/libmosek64.{}.{}.dylib".format( mosek_major_version, mosek_minor_version, ), ] for file in files: file_path = repository_ctx.path( "{}/{}".format(platform_prefix, file), ) result = repository_ctx.execute([ install_name_tool, "-id", file_path, file_path, ]) if result.return_code != 0: fail( "Could NOT change shared library identification name", attr = result.stderr, ) srcs = [] bin_path = repository_ctx.path("{}/bin".format(platform_prefix)) linkopts = [ "-L{}".format(bin_path), "-lmosek64", ] else: files = [ # We use the the MOSEK™ copy of libtbb. The version of libtbb # available in Ubuntu is too old. "bin/libtbb.so.12", "bin/libtbb.so.12.6", "bin/libmosek64.so.{}.{}".format( mosek_major_version, mosek_minor_version, ), ] linkopts = ["-pthread"] srcs = ["{}/{}".format(platform_prefix, file) for file in files] hdrs = ["{}/h/mosek.h".format(platform_prefix)] includes = ["{}/h".format(platform_prefix)] files = ["{}/{}".format(platform_prefix, file) for file in files] libraries_strip_prefix = ["{}/bin".format(platform_prefix)] file_content = """# DO NOT EDIT: generated by mosek_repository()
[ 2581 ]
def METHOD_NAME( self, query: PostgresQuery, prep: Prepare, name: bytes ) -> Optional[Key]: """Handle 'query' for possible addition to the cache. If a new entry has been added, return its key. Return None otherwise (meaning the query is already in cache or cache is not enabled). """ # don't do anything if prepared statements are disabled if self.prepare_threshold is None: return None key = self.key(query) if key in self._counts: if prep is Prepare.SHOULD: del self._counts[key] self._names[key] = name else: self._counts[key] += 1 self._counts.move_to_end(key) return None elif key in self._names: self._names.move_to_end(key) return None else: if prep is Prepare.SHOULD: self._names[key] = name else: self._counts[key] = 1 return key
[ 2946, 238, 24, 596 ]
def METHOD_NAME(): path = ObjectPath.root().attr(None) assert isinstance(path, object_path.UnknownAttributeAccessPath) assert str(path) == "<root>.<unknown attribute>" assert len(path) == 2 assert path.parent == ObjectPath.root()
[ 9, 157, 864, 46 ]
def METHOD_NAME(self, temperature: float) -> float: """ Poisson's ratio. Parameters ---------- temperature: The optional temperature [K]. Returns ------- Poisson's ratio for the material at the given temperature. """ return self._calc_homogenised_property("mu", temperature)
[ 2283 ]
async def METHOD_NAME(self): self.template.deleted = True await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
[ 958, 531, 481 ]
def METHOD_NAME(self): """Get an exception from the pool. :rtype: :class:`~ThreadException` """ try: (request, exc) = self._exc_queue.get_nowait() except queue.Empty: return None else: return ThreadException(request, exc)
[ 19, 442 ]
def METHOD_NAME(self, data): """ Callback that processes the input data and publishes to the corresponding topics. :param data: input message :type data: sensor_msgs.msg.Image """ # Convert sensor_msgs.msg.Image into OpenDR Image image = self.bridge.from_ros_image(data, encoding='bgr8') # Run object detection boxes = self.object_detector.infer(image, threshold=0.1, keep_size=False) if self.object_publisher is not None: # Publish detections in ROS message ros_boxes = self.bridge.to_ros_bounding_box_list(boxes) # Convert to ROS bounding_box_list self.object_publisher.publish(ros_boxes) if self.image_publisher is not None: # Get an OpenCV image back image = image.opencv() # Annotate image with object detection boxes image = draw_bounding_boxes(image, boxes, class_names=self.object_detector.classes) # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
[ 1076 ]
f METHOD_NAME(self, ll=None):
[ 203 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_operations_request( api_version=api_version, template_url=self.list_operations.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("OperationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(self, instance): try: present = services.is_user_present( self.context["request"].user, instance, ) if present is None: return False return present except AttributeError: return False
[ 2541 ]
def METHOD_NAME(): from encoded.reports.serializers import map_string_to_boolean_and_int assert map_string_to_boolean_and_int('true') == True assert map_string_to_boolean_and_int('false') == False assert map_string_to_boolean_and_int('GRCh38') == 'GRCh38' assert map_string_to_boolean_and_int('97.32') == '97.32' assert map_string_to_boolean_and_int('2') == 2 assert map_string_to_boolean_and_int('5000000000') == 5000000000
[ 9, 422, 144, 24, 201, 61, 962 ]
def METHOD_NAME(self, message, timeout=0): self.status.set_text(message) if timeout: GLib.timeout_add_seconds(timeout, self.METHOD_NAME, '', 0)
[ 0, 452 ]
def METHOD_NAME(command_tester_factory: CommandTesterFactory) -> CommandTester: return command_tester_factory("run")
[ 4769 ]
def METHOD_NAME(self): ''' Stop the Bot if cannot connect to AMQP Server after the defined connection attempts ''' # self._connection and self.channel can be None if getattr(self._connection, 'is_closed', None) or getattr(self.channel, 'is_closed', None): self.connect_server() event = self.receive_message() body = self.export_event(event, return_type=str) # replace unicode characters when encoding (#1296) body = body.encode(errors='backslashreplace') if self.format_routing_key: routing_key = self.routing_key.format(ev=event) else: routing_key = self.routing_key try: if not self.channel.basic_publish(exchange=self.exchange_name, routing_key=routing_key, body=body, properties=self.properties, mandatory=True): if self.require_confirmation and not self.publish_raises_nack: raise ValueError('Message sent but not confirmed.') elif not self.publish_raises_nack: self.logger.info('Message sent but not confirmed.') except (pika.exceptions.ChannelError, pika.exceptions.AMQPChannelError, pika.exceptions.NackError): self.logger.exception('Error publishing the message.') except pika.exceptions.UnroutableError: self.logger.exception('The destination queue does not exist, declare it first. See also the README.') self.stop() else: self.acknowledge_message()
[ 356 ]
def METHOD_NAME(t): bitgen = t(seed=42) gen = num.random.Generator(bitgen) k = 3.0 lam = 1.414 a = gen.noncentral_chisquare(k, lam, size=(1024 * 1024,), dtype=np.float32) theo_mean = k + lam theo_std = np.sqrt(2.0 * (k + 2.0 * lam)) assert_distribution(a, theo_mean, theo_std)
[ 9, 14495, 11856, 7334 ]
METHOD_NAME(self):
[ 9, 1188, 2269, 623, 1024, 846 ]
def METHOD_NAME(self, interface): """Test entropies on analytic results""" dev = qml.device("default.qubit", wires=2, shots=100000) @qml.qnode(dev, interface=interface) def circuit(): qml.IsingXX(0.5, wires=[0, 1]) return qml.classical_shadow(wires=range(2)) param = 0.5 bits, recipes = circuit() shadow = qml.ClassicalShadow(bits, recipes) # explicitly not use pytest parametrize to reuse the same measurements for alpha in [1, 2, 3]: for base in [2, np.exp(1)]: for reduced_wires in [[0], [1]]: entropy = shadow.entropy(wires=reduced_wires, base=base, alpha=alpha) expected_entropy = expected_entropy_ising_xx(param, alpha) / np.log(base) assert qml.math.allclose(entropy, expected_entropy, atol=1e-1)
[ 9, 3256, 3227 ]
async def METHOD_NAME(func, *args, **kwargs): def raise_for_status(response, deserialized, headers): response.http_response._internal_response.raise_for_status() try: http_response = await func(*args, cls=raise_for_status, **kwargs) except Exception as err: print(err.response.text()) pytest.fail()
[ 638, 41, 390 ]
def METHOD_NAME(self, session, Car): query = sa.select(*_select_args(Car.price_range)) # the type should be cacheable and not throw exception session.execute(query)
[ 9, 4405 ]
def METHOD_NAME(raw_string): """Function to parse the json""" config_parsed_data = ast.literal_eval(raw_string) config_intermediate_data = json.dumps(config_parsed_data) parsed_json_data = json.loads(config_intermediate_data) return parsed_json_data
[ 214, 763 ]
def METHOD_NAME(self, values): if not values and self.required: error_msg = _( "Field can't be empty. Please put the item OR items separated " "by new line or comma." ) raise forms.ValidationError(error_msg, code='required') non_empty_values = [ item for item in values if str(item).strip() ] if not self.allow_duplicates: has_duplicates = len(set(non_empty_values)) != len(non_empty_values) if has_duplicates: raise forms.ValidationError(_("There are duplicates in field."))
[ 187 ]
def METHOD_NAME(): list_data = list(Data) pod_dir = DirectPOD(solution_snapshots=list_data, reconstruction_percentage=100) actual_result = pod_dir.reconstructed_solution[0][1][1] expected_result = 0.3054 assert expected_result == round(actual_result, 6)
[ 9, 4234, 7107, 227, 245, 365 ]
def METHOD_NAME( rdf_test_uri: URIRef, func, suite_base, cat, num, inputpath, expectedpath, context, options, ): func(suite_base, cat, num, inputpath, expectedpath, context, options)
[ 9, 482 ]
def METHOD_NAME(arg, l_opts, pc_opts, ps_opts, request): nst = request.getfixturevalue(arg) grid = nst.grid parcels = nst._parcels opts = {**l_opts, **pc_opts, **ps_opts} plot_network_and_parcels(grid, parcels, parcel_time_index=0, **opts)
[ 9, 548, 1881 ]
def METHOD_NAME(self, callback): self._certverify = callback
[ 0, 1162, 1076 ]
def METHOD_NAME(expr): source_code = expr byte_code = compile_restricted(source_code, filename='<inline code>', mode='eval') return byte_code
[ 296, 7598 ]
def METHOD_NAME(self): self.client.execute = mock.MagicMock() self.assertEqual( list( self.client.execute_and_paginate( _TEST_QUERY, _TEST_QUERY_PATH, first_page=self.mock_result(["foo"]), ) ), ["foo"], ) self.assertEqual(self.client.execute.call_count, 0)
[ 9, 750, 61, 11465, 865, 1174, 1658 ]
def METHOD_NAME(): with patch( "openbb_terminal.core.session.session_controller.login", return_value=session_controller.LoginStatus.NO_RESPONSE, ) as login_mock, patch( "openbb_terminal.core.session.session_controller.prompt_cli", return_value=True ) as prompt_mock: session_controller.login_and_launch(session={}) assert login_mock.call_count == 1 assert prompt_mock.call_count == 1
[ 9, 273, 61, 1440, 654, 17 ]
def METHOD_NAME(cls): return "sparql_endpoint"
[ 44 ]
def METHOD_NAME(self) -> Optional[bool]: """ Determines whether the value is a secret and should be encrypted or not. Default value is false. """ return pulumi.get(self, "secret")
[ 444 ]
def METHOD_NAME(name, suffix_length=None, total_length=None): """Add a random part to of a specified length to a name (string) >>> append_random("tmp", 8) >>> append_random("tmp", total_length=16) ..note:: This function is copied from grass79. """ if suffix_length and total_length: raise ValueError( "Either suffix_length or total_length can be provided, not both" ) if not suffix_length and not total_length: raise ValueError("suffix_length or total_length has to be provided") if total_length: # remove len of name and one underscore name_length = len(name) suffix_length = total_length - name_length - 1 if suffix_length <= 0: raise ValueError( "No characters left for the suffix:" " total_length <{total_length}> is too small" " or name <{name}> ({name_length}) is too long".format(**locals()) ) # We don't do lower and upper case because that could cause conflicts in # contexts which are case-insensitive. # We use lowercase because that's what is in UUID4 hex string. allowed_chars = string.ascii_lowercase + string.digits # The following can be shorter with random.choices from Python 3.6. suffix = "".join(random.choice(allowed_chars) for _ in range(suffix_length)) return "{name}_{suffix}".format(**locals())
[ 1459, 236 ]
def METHOD_NAME(args): rets = {} fileset = args.get('fileset') filesystem = args.get('filesystem') space = args.get('space') files = args.get('files') if not filesystem: raise ValueError('missing variable: filesystem') if not fileset: raise ValueError('missing variable: fileset') if not space: raise ValueError('missing variable: space') if not files: raise ValueError('missing variable: files') result = ess.fileset_quota(filesystem, fileset, space, files) error = result.get('error') job = result.get('job') if error or result.get('status') != 'COMPLETED': RuntimeError('failed to set quota on fileset') return rets
[ 14167, 5081, 2268 ]
def METHOD_NAME(self): return QSize(550, 100)
[ 1318, 3711 ]
def METHOD_NAME(self): self.errpipe_read, errpipe_write = os.pipe() self.pid, self.child_fd = pty.fork() if self.pid == 0: # We're the child # Warning! There are potential race conditions if a signal is # received (or maybe other things happen) before the execvpe call # replaces python state... # prevent server's finally block from running in the event of an # early signal: config.set("pidfile", None) # replace server's signal handling: def handle_signal(signal, f): sys.exit(0) signal.signal(1, handle_signal) # Set window size cols, lines = self.get_terminal_size() s = struct.pack("HHHH", lines, cols, 0, 0) fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, s) os.close(self.errpipe_read) os.dup2(errpipe_write, 2) # Make sure not to retain any files from the parent max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0] for i in range(3, max_fd): try: os.close(i) except OSError: pass # And exec env = dict(os.environ) env.update(self.env_vars) env["COLUMNS"] = str(cols) env["LINES"] = str(lines) env["TERM"] = "linux" if self.game_cwd: os.chdir(self.game_cwd) try: os.execvpe(self.command[0], self.command, env) except OSError: sys.exit(1) # We're the parent os.close(errpipe_write) if not self.ttyrec: self.desc = "TerminalRecorder (fd %d)" % self.child_fd IOLoop.current().add_handler(self.child_fd, self._handle_read, IOLoop.ERROR | IOLoop.READ) IOLoop.current().add_handler(self.errpipe_read, self._handle_err_read, IOLoop.READ)
[ 597 ]
def METHOD_NAME(self): area = Area([(1, 1), (2, 2), (3, 3)]).redim.range(y=(0, 4)).opts(padding=0.1) plot = mpl_renderer.get_plot(area) x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim() self.assertEqual(x_range[0], 0.8) self.assertEqual(x_range[1], 3.2) self.assertEqual(y_range[0], 0) self.assertEqual(y_range[1], 4)
[ 9, 690, 746, 388, 661 ]
def METHOD_NAME( compressed_model_outputs: Any, orig_model_outputs: Any, kd_loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], ) -> Optional[torch.Tensor]: """ Calculates knowledge distillation loss value from compressed_model_outputs and orig_model_outputs. First uses nested_object_paths_generator to unpack input containers and numerate contents inside them. Than checks compressed_model_outputs unpacked container for loss tensors (requires_grad=True) and maps extracted structure of loss tensors to orig_model_outputs. Finally computes knowledge distillation loss with extracted loss tensors. :param compressed_model_outputs: Output tensors of compressed model can be any type of container with deterministic traversal. :param orig_model_outputs: Output tensors of original model (used for distillation) can be any type of container with deterministic traversal. :return: knowledge distillation loss value """ compressed_model_outputs_nested_obj_indexing = NestedObjectIndex([compressed_model_outputs]) orig_model_outputs_nested_obj_indexing = NestedObjectIndex([orig_model_outputs]) compressed_model_loss_outputs_nested_obj_indexing = list( filter( lambda x: KnowledgeDistillationLoss._is_loss(x.getter()), compressed_model_outputs_nested_obj_indexing.get_flat_nested_obj_indexing(), ) ) compressed_model_loss_outputs = list( map(lambda x: x.getter(), compressed_model_loss_outputs_nested_obj_indexing) ) def match_fn(obj): for x in compressed_model_loss_outputs_nested_obj_indexing: if x.path == obj.path: return True return False orig_model_loss_outputs = list( map( lambda x: x.getter(), filter(match_fn, orig_model_outputs_nested_obj_indexing.get_flat_nested_obj_indexing()), ) ) if len(orig_model_loss_outputs) != len(compressed_model_loss_outputs): nncf_logger.warning( f"KD: mismatch in the number of detected loss tensors in return value between original " f"and compressed models;\n" f"original has {len(orig_model_loss_outputs)} loss tensors,\n" f"compressed has {len(compressed_model_loss_outputs)}" ) if not orig_model_loss_outputs: nncf_logger.warning("KD: no loss outputs detected in original model, knowledge distillation not possible") return None if not compressed_model_loss_outputs: nncf_logger.warning("KD: no loss outputs detected in compressed model, knowledge distillation not possible") return None return reduce( lambda kd_loss, loss_tensors: kd_loss + kd_loss_fn(loss_tensors[0], loss_tensors[1]), zip(orig_model_loss_outputs, compressed_model_loss_outputs), torch.zeros([], device=orig_model_loss_outputs[0].device), )
[ 1593 ]
def METHOD_NAME(self): if self.pos.x < -self.size: self.pos.x = width + self.size if self.pos.x > width + self.size: self.pos.x = -self.size if self.pos.y < -self.size: self.pos.y = height + self.size if self.pos.y > height + self.size: self.pos.y = -self.size
[ 250, 5543 ]
def METHOD_NAME(context): """Check that the data is available in the scene.""" assert context.scene["M02"] is not None assert context.scene.get("M01") is None
[ 367, 2581, 365, 1272, 623, 2453 ]
def METHOD_NAME(self): # Positive constants self.assertEqual(0o20000000000, 2147483648) self.assertEqual(0o37777777777, 4294967295) # Ditto with a minus sign and parentheses self.assertEqual(-(0o20000000000), -2147483648) self.assertEqual(-(0o37777777777), -4294967295) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0o20000000000, -2147483648) self.assertEqual(-0o37777777777, -4294967295) # Positive constants self.assertEqual(0o1000000000000000000000, 9223372036854775808) self.assertEqual(0o1777777777777777777777, 18446744073709551615) # Ditto with a minus sign and parentheses self.assertEqual(-(0o1000000000000000000000), -9223372036854775808) self.assertEqual(-(0o1777777777777777777777), -18446744073709551615) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0o1000000000000000000000, -9223372036854775808) self.assertEqual(-0o1777777777777777777777, -18446744073709551615)
[ 9, 10371, 1715 ]
def METHOD_NAME(self, x): out = self.m(x) return out
[ 56 ]
def METHOD_NAME(self, request, **kwargs): timeout = kwargs.get("timeout") if timeout is None: kwargs["timeout"] = self.timeout return super().METHOD_NAME(request, **kwargs)
[ 353 ]
def METHOD_NAME(data): cleaned_data = {} cleaned_data["medicine"] = data["medicine"] cleaned_data["route"] = data.get("route", "").upper() or None cleaned_data["days"] = clean_integer(data.get("days", 0)) cleaned_data["indicator"] = data.get("indicator") cleaned_data["max_dosage"] = data.get("max_dosage") cleaned_data["min_hours_between_doses"] = clean_integer(data.get("min_time", 0)) cleaned_data["notes"] = data.get("notes", "") if data.get("dosage", "").upper() in FREQUENCY_OPTIONS: cleaned_data["frequency"] = data.get("dosage") cleaned_data["dosage"] = data.get("dosage_new") elif data.get("dosage_new", "").upper() in FREQUENCY_OPTIONS: cleaned_data["frequency"] = data.get("dosage_new") cleaned_data["dosage"] = data.get("dosage") else: cleaned_data["frequency"] = None cleaned_data["dosage"] = data.get("dosage_new") or data.get("dosage") return cleaned_data
[ 1356, -1 ]
def METHOD_NAME(): class A: def foo(self): return 42 class B(A): pass B.foo = lambda self: 1 del B.foo class C(B): pass c = C() assert c.foo() == 4
[ 9, 654, 99, 61, 5056, 555 ]
def METHOD_NAME( ocm_url: str, httpretty: httpretty_module, ) -> Callable[[str, str], Optional[HTTPrettyRequest]]: def find_request(method: str, path: str) -> Optional[HTTPrettyRequest]: for req in httpretty.latest_requests(): if _request_matches(req, method, ocm_url, path): return req return None return find_request
[ 416, 6029, 721, 377 ]
def METHOD_NAME(s): return split(s)[1]
[ 5926 ]
def METHOD_NAME(request, api): '''fixture to create an user''' METHOD_NAME = api.users.create( '{}@tenable.com'.format(uuid.uuid4()), '{}Tt!'.format(uuid.uuid4()), 64) def teardown(): '''function to clear the user''' try: api.users.delete(METHOD_NAME['id']) except NotFoundError as notfound: log_exception(notfound) request.addfinalizer(teardown) return METHOD_NAME
[ 21 ]
def METHOD_NAME(filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin """Loads a model saved via `save_model`. Arguments: filepath: One of the following: - String, path to the saved model - `h5py.File` object from which to load the model custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. compile: Boolean, whether to compile the model after loading. Returns: A Keras model instance. If an optimizer was found as part of the saved model, the model is already compiled. Otherwise, the model is uncompiled and a warning will be displayed. When `compile` is set to False, the compilation is omitted without any warning. Raises: ImportError: if loading from an hdf5 file and h5py is not available. IOError: In case of an invalid savefile. """ if (h5py is not None and ( isinstance(filepath, h5py.File) or h5py.is_hdf5(filepath))): return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile) if isinstance(filepath, six.string_types): loader_impl.parse_saved_model(filepath) return saved_model_load.load(filepath, compile) raise IOError( 'Unable to load model. Filepath is not an hdf5 file (or h5py is not ' 'available) or SavedModel.')
[ 557, 578 ]
def METHOD_NAME(tag_set): encoded = '' for tag in tag_set: if len(encoded) > 0: encoded += '&' encoded += tag['Key'] + '=' + tag['Value'] return encoded
[ 421, 82, 0 ]
def METHOD_NAME(self) -> Optional[str]: """ The role definition of management policy assignment. """ return pulumi.get(self, "role_definition_id")
[ 1018, 1208, 147 ]
def METHOD_NAME(client, sample_service, mocker): mocker.patch.dict("app.v2.inbound_sms.get_inbound_sms.current_app.config", {"API_PAGE_SIZE": 2}) all_inbound_sms = [ create_inbound_sms(service=sample_service, user_number="447700900111", content="Hi"), create_inbound_sms(service=sample_service, user_number="447700900111"), create_inbound_sms(service=sample_service, user_number="447700900111", content="End"), ] reversed_inbound_sms = sorted(all_inbound_sms, key=lambda sms: sms.created_at, reverse=True) auth_header = create_authorization_header(service_id=sample_service.id) response = client.get( url_for("v2_inbound_sms.get_inbound_sms"), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 200 json_response = json.loads(response.get_data(as_text=True)) expected_inbound_sms_list = [i.serialize() for i in reversed_inbound_sms[:2]] assert json_response["received_text_messages"] == expected_inbound_sms_list assert url_for("v2_inbound_sms.get_inbound_sms", _external=True) == json_response["links"]["current"] assert ( url_for( "v2_inbound_sms.get_inbound_sms", older_than=reversed_inbound_sms[1].id, _external=True, ) == json_response["links"]["next"] )
[ 9, 19, 8837, 3179, 567, 1174, 1127 ]
def METHOD_NAME(save_dir, sleep_time): if not os.path.isfile("url_name.txt"): return with open("url_name.txt", "r") as input_file: for line in input_file: if not line.isspace(): save_dir2 = save_dir line_list = line.split(", ") print(line_list) try: url_2 = line_list[0] file_name = line_list[1].replace("-", "_") year_folder = line_list[2].rstrip() + "/" # year_folder = year_folder print("Year_folder: " + year_folder) except IndexError: print(line_list) pass if not os.path.exists(save_dir + year_folder.strip("\n")): os.makedirs(save_dir + year_folder.strip("\n")) save_dir2 = save_dir + year_folder if os.path.exists(save_dir2 + file_name) == False: pdf = urllib.request.urlopen(url_2) with open(save_dir2 + file_name + ".pdf", "wb") as file: file.write(pdf.read()) file.close() time.sleep(sleep_time) print("Sleep") input_file.close() # os.remove("url_name.txt")
[ 19, 1537 ]
METHOD_NAME( self ) :
[ 9, 5230, 947, 2635 ]
def METHOD_NAME(self, args_dict): if 'channel_measurement' not in args_dict: self.logger.error("Cannot save measurement without a Channel") return if 'value_measurement' not in args_dict: self.logger.error("Cannot save measurement without a Value") return channel = args_dict['channel_measurement'] value = args_dict['value_measurement'] measurements = { channel: { 'measurement': self.channels_measurement[channel].measurement, 'unit': self.channels_measurement[channel].unit, 'value': value } } # Convert value/unit is conversion_id present and valid if self.channels_conversion[channel]: conversion = db_retrieve_table_daemon( Conversion, unique_id=self.channels_measurement[channel].conversion_id) if conversion: meas = parse_measurement( self.channels_conversion[channel], self.channels_measurement[channel], measurements, channel, measurements[channel]) measurements[channel]['measurement'] = meas[channel]['measurement'] measurements[channel]['unit'] = meas[channel]['unit'] measurements[channel]['value'] = meas[channel]['value'] if measurements: self.logger.debug("Adding measurements to influxdb: {}".format(measurements)) add_measurements_influxdb(self.unique_id, measurements) else: self.logger.debug("No measurements to add to influxdb."
[ 73, 479 ]
def METHOD_NAME(): data = [ { "title": "title", "paragraphs": [ { "context": "context", "document_id": "document_id", "qas": [ { "question": "question", "id": "id", "answers": [{"text": "answer", "answer_start": 1}], "is_impossible": False, } ], } ], } ] expected_result = pd.DataFrame( [["title", "context", "question", "id", "answer", 1, False, "document_id"]], columns=["title", "context", "question", "id", "answer_text", "answer_start", "is_impossible", "document_id"], ) result = SquadData.to_df(data) assert result.equals(expected_result)
[ 9, 4736, 365, 4323, 365, 24, 2057 ]
f METHOD_NAME(self):
[ 9, 2707, 559 ]
def METHOD_NAME(line, highlight_words): # Join all the words that need to be bolded into one regex words_re = regex.combine_wordsRE(highlight_words) line = words_re.sub(r'<span class="keyword">\1</span>', line) return '<span class="highlight">%s</span>' % line
[ 8186 ]
def METHOD_NAME(self): self.x = array( [ [51.65, -1.90, 50.07], [50.40, -1.23, 50.65], [50.68, -0.04, 51.54], [50.22, -0.02, 52.85], ] ) self.y = array( [ [51.30, -2.99, 46.54], [51.09, -1.88, 47.58], [52.36, -1.20, 48.03], [52.71, -1.18, 49.38], ] ) self.sup = SVDSuperimposer() self.sup.set(self.x, self.y)
[ 0, 1 ]
def METHOD_NAME(self) -> list[dict[str, Any]]: """ Validates a SQL statement :return: A List of SQLValidationAnnotation :raises: DatabaseNotFoundError, NoValidatorConfigFoundError NoValidatorFoundError, ValidatorSQLUnexpectedError, ValidatorSQLError ValidatorSQL400Error """ self.validate() if not self._validator or not self._model: raise ValidatorSQLUnexpectedError() sql = self._properties["sql"] schema = self._properties.get("schema") try: timeout = current_app.config["SQLLAB_VALIDATION_TIMEOUT"] timeout_msg = f"The query exceeded the {timeout} seconds timeout." with utils.timeout(seconds=timeout, error_message=timeout_msg): errors = self._validator.validate(sql, schema, self._model) return [err.to_dict() for err in errors] except Exception as ex: logger.exception(ex) superset_error = SupersetError( message=__( "%(validator)s was unable to check your query.\n" "Please recheck your query.\n" "Exception: %(ex)s", validator=self._validator.name, ex=ex, ), error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR, level=ErrorLevel.ERROR, ) # Return as a 400 if the database error message says we got a 4xx error if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(ex)): raise ValidatorSQL400Error(superset_error) from ex raise ValidatorSQLError(superset_error) from ex
[ 22 ]
def METHOD_NAME(pathname, recursive): dirname, basename = os.path.split(pathname) if not has_magic(pathname): if basename: if os.path.lexists(pathname): yield pathname else: # Patterns ending with a slash should match only directories if os.path.isdir(dirname): yield pathname return if not dirname: if recursive and _isrecursive(basename): yield from glob2(dirname, basename) else: yield from glob1(dirname, basename) return # `os.path.split()` returns the argument itself as a dirname if it is a # drive or UNC path. Prevent an infinite recursion if a drive or UNC path # contains magic characters (i.e. r'\\?\C:'). if dirname != pathname and has_magic(dirname): dirs = METHOD_NAME(dirname, recursive) else: dirs = [dirname] if has_magic(basename): if recursive and _isrecursive(basename): glob_in_dir = glob2 else: glob_in_dir = glob1 else: glob_in_dir = glob0 for dirname in dirs: for name in glob_in_dir(dirname, basename): yield os.path.join(dirname, name)
[ 5764 ]
def METHOD_NAME(object_to_convert): if not isinstance(object_to_convert, list): raise AssertionError("Expected a list or subclass, got {0}".format(type(object_to_convert))) if len(object_to_convert) == 0: return "" all_values = [to_html(element) for element in object_to_convert] all_rows = [get_data_row_string([element]) for element in all_values] return table_from_html_rows(all_rows)
[ 197, 245, 24, 410 ]
def METHOD_NAME(self): IGNORED_ARGS = ['_target_'] result = config_utils.assert_dataclass_signature_match( adapter_mixin_strategies.ResidualAddAdapterStrategy, adapter_mixin_strategies.ResidualAddAdapterStrategyConfig, ignore_args=IGNORED_ARGS, ) signatures_match, cls_subset, dataclass_subset = result assert signatures_match assert cls_subset is None assert dataclass_subset is None
[ 9, 1770, 238, 3675, 1554, 200 ]
def METHOD_NAME(self): """ Retrieves the serial number of the PSU Returns: string: Serial number of PSU """ try: val = open(self.eeprom, "rb").read()[0xc4:0xd9] except Exception: val = None if val != "NA" and len(val) == 23: return val[-3:] else: return "NA"
[ 19, 71 ]
def METHOD_NAME(q2, par): deltaC7_dict = {} for amp in ['perp0', 'para0', 'perp1', 'para1']: deltaC7_dict[amp] = ( par['Lambdab->Lambda deltaC7 a_' + amp + ' Re'] + par['Lambdab->Lambda deltaC7 b_' + amp + ' Re'] *q2 + 1j*par['Lambdab->Lambda deltaC7 a_' + amp + ' Im'] + 1j*par['Lambdab->Lambda deltaC7 b_' + amp + ' Im'] *q2) return transversity_amps_deltaC7(q2, deltaC7_dict, par)
[ -1, 7228, 1364, 15149, 2070 ]
def METHOD_NAME(self) -> str: """ ETag of the outbound endpoint. """ return pulumi.get(self, "etag")
[ 431 ]
def METHOD_NAME(): # Test that these format names don't throw an error Image(format='png') Image(format='jpeg') Image(format='url')
[ 9, 660, 275 ]
def METHOD_NAME(message, line_break=True): """Print the message to absl logging or stdout.""" if is_interactive_logging_enabled(): if line_break: sys.stdout.write(message + "\n") else: sys.stdout.write(message) sys.stdout.flush() else: logging.info(message)
[ 38, 169 ]