text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(add_default_extended_time_range: Optional[bool] = None, end_time: Optional[str] = None, entity_id: Optional[str] = None, insight_query_ids: Optional[Sequence[str]] = None, resource_group_name: Optional[str] = None, start_time: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEntityInsightsResult: """ Execute Insights for an entity. :param bool add_default_extended_time_range: Indicates if query time range should be extended with default time range of the query. Default value is false :param str end_time: The end timeline date, so the results returned are before this date. :param str entity_id: entity ID :param Sequence[str] insight_query_ids: List of Insights Query Id. If empty, default value is all insights of this entity :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str start_time: The start timeline date, so the results returned are after this date. :param str workspace_name: The name of the workspace. """ __args__ = dict() __args__['addDefaultExtendedTimeRange'] = add_default_extended_time_range __args__['endTime'] = end_time __args__['entityId'] = entity_id __args__['insightQueryIds'] = insight_query_ids __args__['resourceGroupName'] = resource_group_name __args__['startTime'] = start_time __args__['workspaceName'] = workspace_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230301preview:getEntityInsights', __args__, opts=opts, typ=GetEntityInsightsResult).value return AwaitableGetEntityInsightsResult( meta_data=pulumi.get(__ret__, 'meta_data'), value=pulumi.get(__ret__, 'value'))
[ 19, 2419, 1689 ]
def METHOD_NAME( base_length: U256, modulus_length: U256, exponent_length: U256, exponent_head: Uint, ) -> Uint: """ Calculate the gas cost of performing a modular exponentiation. Parameters ---------- base_length : Length of the array representing the base integer. modulus_length : Length of the array representing the modulus integer. exponent_length : Length of the array representing the exponent integer. exponent_head : First 32 bytes of the exponent (with leading zero padding if it is shorter than 32 bytes), as an unsigned integer. Returns ------- gas_cost : `Uint` Gas required for performing the operation. """ multiplication_complexity = complexity(base_length, modulus_length) iteration_count = iterations(exponent_length, exponent_head) cost = multiplication_complexity * iteration_count cost //= GQUADDIVISOR return max(Uint(200), cost)
[ 1921, 1955 ]
def METHOD_NAME(self): if self.one_ctf or self.reverse_one_ctf: self.onectf_reset_flag(self.blue_team.flag) self.onectf_reset_flag(self.green_team.flag)
[ -1, 656, 1106 ]
def METHOD_NAME(): m1 = LayerMetal( name="M1", gds_layer_number=1, direction="v", min_length=1000, min_end_to_end=400, offset=0, width=[600], space=[400], stop_pitch=1000, stop_point=200, stop_offset=0 ) m2 = LayerMetal( name="M2", gds_layer_number=2, direction="h", min_length=500, min_end_to_end=300, offset=0, width=[400, 500, 500, 600, 600, 500, 500], space=[300, 300, 400, 400, 400, 300, 300], stop_pitch=1000, stop_point=350, stop_offset=0 ) v1 = LayerVia( name="V1", gds_layer_number=21, stack=['M1', 'M2'], width_x=600, width_y=500, space_x=100, space_y=100 ) pdk = PDK(name= "Mock", layers={'M1': m1, 'M2': m2, 'V1': v1}) with open(my_dir/"test_pdk_one-cand.json", "wt") as fp: fp.write(json.dumps(pdk.dict(), indent=2) + '\n')
[ 9, 206 ]
def METHOD_NAME(s1, s2): s1 = normalize_xml(s1) s2 = normalize_xml(s2) TEST_CASE.assertEqual(s1, s2, "XML Output Changed")
[ 250, 399 ]
def METHOD_NAME(self): self.enable_acl() response = self.git_receive() self.assertEqual(401, response.status_code)
[ 9, 1493, 375, 1918, 4496 ]
def METHOD_NAME(): """ Provide a text file containing a single SMILES string and three-letter residue name. Receive (res).pdb and (res).mol2 files containing a single molecule with conformation. Receive (res)-box.pdb containing a box with specified number Dependencies: OpenEye tools (for creating molecule from SMILES) openmoltools (for calling OpenEye to generate conformer) Gromacs 4.6.7 or 5.1.4 (for calling genbox to create solvent box) ForceBalance 1.5.x (for putting information back that was thrown away by genbox) """ parser = argparse.ArgumentParser() parser.add_argument('--density', type=int, default=600, help='Specify target density of the solvent box; should be somewhat smaller than true liquid density due to imperfect packing.') parser.add_argument('--nmol', type=int, default=256, help='Specify desired number of molecules in the solvent box.') parser.add_argument('--tries', type=int, default=10, help='Pass number of tries per molecule to be passed to genbox. Higher = longer runtime but may achieve higher density.') parser.add_argument('input', type=str, help='Input file containing a single SMILES string') parser.add_argument('resname', type=str, help='Specify a custom residue name for the molecule.') print(f"{__file__} called with the following command line:") print(' '.join(sys.argv)) args = parser.parse_args(sys.argv[1:]) # Create the desired files (.mol2 file containing a single conformation and .pdb file containing solvent box). run_create_mol2_pdb(**vars(args))
[ 57 ]
def METHOD_NAME(self): if self.level > 0: return self.lastpos return self.fp.METHOD_NAME() - self.start
[ 6359 ]
def METHOD_NAME(names, pat): """Construct a list from those elements of the iterable NAMES that match PAT.""" result = [] pat = os.path.normcase(pat) match = _compile_pattern(pat) if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. for name in names: if match(name): result.append(name) else: for name in names: if match(os.path.normcase(name)): result.append(name) return result
[ 527 ]
def METHOD_NAME(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: return True
[ 137, 735, 1205 ]
async def METHOD_NAME(cls): """Create the table in the database""" async with db.Pool.acquire() as conn: await conn.execute(f""" CREATE TABLE {cls.__tablename__} ( guild_id bigint PRIMARY KEY NOT NULL, memberlog_channel bigint NOT NULL, name varchar NOT NULL, send_on_verify boolean )""")
[ 2471, 129 ]
def METHOD_NAME(topology_st): """ :id: d5e16aeb-6810-423b-b5e0-f89e0596292e :setup: Data directory with an openldap config directory. :steps: 1. Parse the configuration 2. Execute a migration with skipped elements :expectedresults: 1. Success 2. Success """ inst = topology_st.standalone config_path = os.path.join(DATADIR1, 'slapd.d') config = olConfig(config_path) ldifs = { "dc=example,dc=com": os.path.join(DATADIR1, 'example_com.slapcat.ldif'), } # 1.3.6.1.4.1.5322.13.1.1 is namedObject, so check that isn't there migration = Migration(inst, config.schema, config.databases, ldifs, skip_schema_oids=['1.3.6.1.4.1.5322.13.1.1'], skip_overlays=[olOverlayType.UNIQUE], ) print("==== migration plan ====") print(migration.__unicode__()) print("==== end migration plan ====") migration.execute_plan() # Check that the overlay ISNT there # Check the schema that SHOULDNT be there.
[ 9, 2744, 6933, -1, 2423, 1532 ]
def METHOD_NAME(self, healthy_example_imu_data): """Test that a correct 180flip is applied if the data is upside-down.""" data = healthy_example_imu_data dataset_flipped = rotate_dataset(data, Rotation.from_euler("z", 180, degrees=True)) fwdsa = ForwardDirectionSignAlignment().align(dataset_flipped, sampling_rate_hz=204.8) for sensor in get_multi_sensor_names(data): assert_almost_equal(data[sensor].to_numpy(), fwdsa.aligned_data_[sensor].to_numpy()) assert_almost_equal(np.rad2deg(fwdsa.rotation_[sensor].as_euler("zxy")), np.array([180.0, 0.0, 0.0]))
[ 9, 4864, 2271 ]
def METHOD_NAME(self): # input args -> format name test_io = [(("coucou.h5",), "HDF5"), (("coucou.le monde.hdf5",), "HDF5"), (("coucou.H5",), "HDF5"), (("some/fancy/../path/file.tiff",), "TIFF"), (("some/fancy/../.hdf5/h5.ome.tiff",), "TIFF"), (("catmaids://fafb.catmaid.virtualflybrain.org/?pid=1&sid0=1",), "Catmaid"), (("catmaid://catmaid.neurodata.io/catmaid/",), "Catmaid"), (("CATMAID://catmaid.neurodata.io/catmaid/",), "Catmaid"), (("a/b/d.tiff",), "TIFF"), (("a/b/d.ome.tiff",), "TIFF"), (("a/b/d.OME.tiff",), "TIFF"), (("a/b/d.OME.TIFF",), "TIFF"), (("a/b/d.h5",), "HDF5"), (("a/b/d.b",), "TIFF"), # fallback to tiff (("d.hdf5",), "HDF5"), (("d.HDF5",), "HDF5"), (("a/b/d.0.ome.tiff",), "TIFF"), # Serialised TIFF must be opened by TIFF ((u"a/b/๐”ธ๐”นโ„‚.ome.tiff".encode("utf-8"),), "TIFF"), # non-ascii characters ] for args, fmt_exp in test_io: fmt_mng = find_fittest_converter(*args, mode=os.O_RDONLY) self.assertEqual(fmt_mng.FORMAT, fmt_exp, "For '%s', expected format %s but got %s" % (args[0], fmt_exp, fmt_mng.FORMAT))
[ 9, 416, -1, 1252, 203 ]
def METHOD_NAME(self): """Stops Spectrum and deletes the Spectrum node""" self.spectrum_node.stop_spectrum() spectrum_node = None if self.is_spectrum_node_available: app.specter.node_manager.delete_node(self.spectrum_node, app.specter) logger.info("Spectrum disabled")
[ 193, 1940 ]
def METHOD_NAME(): raw_event = load_event("s3Event.json") parsed_event: S3Model = S3Model(**raw_event) records = list(parsed_event.Records) assert len(records) == 1 record: S3RecordModel = records[0] raw_record = raw_event["Records"][0] assert record.eventVersion == raw_record["eventVersion"] assert record.eventSource == raw_record["eventSource"] assert record.awsRegion == raw_record["awsRegion"] convert_time = int(round(record.eventTime.timestamp() * 1000)) assert convert_time == 1567539447192 assert record.eventName == raw_record["eventName"] assert record.glacierEventData is None user_identity = record.userIdentity assert user_identity.principalId == raw_record["userIdentity"]["principalId"] request_parameters = record.requestParameters assert str(request_parameters.sourceIPAddress) == "205.255.255.255/32" assert record.responseElements.x_amz_request_id == raw_record["responseElements"]["x-amz-request-id"] assert record.responseElements.x_amz_id_2 == raw_record["responseElements"]["x-amz-id-2"] s3 = record.s3 raw_s3 = raw_event["Records"][0]["s3"] assert s3.s3SchemaVersion == raw_record["s3"]["s3SchemaVersion"] assert s3.configurationId == raw_record["s3"]["configurationId"] assert s3.object.key == raw_s3["object"]["key"] assert s3.object.size == raw_s3["object"]["size"] assert s3.object.eTag == raw_s3["object"]["eTag"] assert s3.object.versionId is None assert s3.object.sequencer == raw_s3["object"]["sequencer"] bucket = s3.bucket raw_bucket = raw_record["s3"]["bucket"] assert bucket.name == raw_bucket["name"] assert bucket.ownerIdentity.principalId == raw_bucket["ownerIdentity"]["principalId"] assert bucket.arn == raw_bucket["arn"]
[ 9, 607, 2117, 417 ]
def METHOD_NAME(file_name: str, file: typing.IO, owner: User, mime_type: str) -> None: """Simple validator that denies all files. Separate for HTML since .html and .htm are both common suffixes for text/html files.""" raise FileValidationError( _('File "{file_name}": HTML upload denied by site security policy').format(file_name=file_name) )
[ 7348, 382 ]
def METHOD_NAME(self, offset, size): self.f.seek(offset) return self.f.read(size)
[ 772, 12192 ]
def METHOD_NAME(self, benchmark, widget, qtbot, fake_web_tab, num_tabs): """Benchmark for update_tab_titles.""" for i in range(num_tabs): widget.addTab(fake_web_tab(), 'foobar' + str(i)) with qtbot.wait_exposed(widget): widget.show() benchmark(widget.update_tab_titles)
[ 9, 86, 5678, 4299, 1668 ]
def METHOD_NAME(f: Callable[[float], float], a: float, b: float, c: float) -> float: if f(a) > c: a, b = b, a # TODO(amylase): Justify this constant for _ in range(100): m = (a + b) / 2 if f(m) < c: a = m else: b = m return m
[ 8660 ]
def METHOD_NAME(self) -> str: """ Gets the workflow trigger callback URL relative path. """ return pulumi.get(self, "relative_path")
[ 1821, 157 ]
def METHOD_NAME(cls, values: Dict[str, Any]) -> Dict[str, Any]: if values.get("client_x509_cert_url") is None: values[ "client_x509_cert_url" ] = f'https://www.googleapis.com/robot/v1/metadata/x509/{values["client_email"]}' return values
[ 187, 200 ]
def METHOD_NAME(img) -> int: """Return the number of dimensions of the image (grayscale = 1, RGB = 3).""" return img.shape[2]
[ 19, 1425 ]
def METHOD_NAME(self, pack_time, referencesf, gc=True): def refs(p, oids=None): return referencesf(unhexlify(p[2:]), oids) return self.base.METHOD_NAME(pack_time, refs, gc)
[ 1699 ]
def METHOD_NAME(self): return self._profiling_mode
[ 19, 854 ]
def METHOD_NAME(self, rules, evaluation: Evaluation): dims = None for rule in rules: pos = rule.elements[0] if pos.has_form("List", None): if dims is None: dims = [0] * len(pos.elements) for i, idx in enumerate(pos.elements): if isinstance(idx, Integer): j = idx.get_int_value() if dims[i] < j: dims[i] = j if any(d == 0 for d in dims): return return ListExpression(*[Integer(d) for d in dims])
[ 416, 5164 ]
def METHOD_NAME(self): temp_dir = tempfile.mkdtemp() queue = ScrapyPriorityQueue.from_crawler( self.crawler, FifoMemoryQueue, temp_dir ) self.assertIsNone(queue.pop()) self.assertEqual(len(queue), 0) req1 = Request("https://example.org/1", priority=1) queue.push(req1) self.assertEqual(len(queue), 1) dequeued = queue.pop() self.assertEqual(len(queue), 0) self.assertEqual(dequeued.url, req1.url) self.assertEqual(dequeued.priority, req1.priority) self.assertEqual(queue.close(), [])
[ 9, 651, 1013, 760, 206 ]
def METHOD_NAME(number: bool): # pylint: disable=invalid-name assert isinstance(number, bool) return FakeThrustRTC.Number(number)
[ 6064, 863 ]
def METHOD_NAME(objective_fn, args, kwargs, grad_fn=None): r"""Compute gradient of the objective function at the given point and return it along with the objective function forward pass (if available). Args: objective_fn (function): the objective function for optimization args (tuple): tuple of NumPy arrays containing the current parameters for the objection function kwargs (dict): keyword arguments for the objective function grad_fn (function): optional gradient function of the objective function with respect to the variables ``args``. If ``None``, the gradient function is computed automatically. Must return the same shape of tuple [array] as the autograd derivative. Returns: tuple (array): NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the objective function output. If ``grad_fn`` is provided, the objective function will not be evaluted and instead ``None`` will be returned. """ g = get_gradient(objective_fn) if grad_fn is None else grad_fn grad = g(*args, **kwargs) forward = getattr(g, "forward", None) num_trainable_args = sum(getattr(arg, "requires_grad", False) for arg in args) grad = (grad,) if num_trainable_args == 1 else grad return grad, forward
[ 226, 140 ]
def METHOD_NAME(request): client, cluster = get_user_client_and_cluster() create_kubeconfig(cluster) project, ns = create_project_and_ns(USER_TOKEN, cluster, random_test_name("websocket")) p_client = get_project_client_for_token(project, USER_TOKEN) con = [{"name": random_test_name(), "image": TEST_IMAGE, "entrypoint": ["/bin/sh"], "command": ["-c", "while true; do echo websocket; sleep 1s; done;" ], }] wl = p_client.create_workload(name=random_test_name(), containers=con, namespaceId=ns.id) validate_workload(p_client, wl, "deployment", ns.name) pod = p_client.list_pod(workloadId=wl.id).data[0] namespace["ns"] = ns.name namespace["pod"] = pod namespace["cluster"] = cluster namespace["shell_url"] = cluster.get("links").get("shell") def fin(): client.delete(project) request.addfinalizer(fin)
[ 129, 155, 340 ]
def METHOD_NAME(client, phase_factory, topic_factory): phase, module, project, topic = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase ) url = topic.get_absolute_url() with freeze_phase(phase): response = client.get(url) assert_template_response(response, "a4_candy_topicprio/topic_detail.html") assert response.status_code == 200
[ 9, 1801, 1179 ]
def METHOD_NAME(path): base_car_info = defaultdict(list) new_car_info = defaultdict(list) for car in load_base_car_info(path): base_car_info[car.car_fingerprint].append(car) for car in get_all_car_info(): new_car_info[car.car_fingerprint].append(car) # Add new platforms to base cars so we can detect additions and removals in one pass base_car_info.update({car: [] for car in new_car_info if car not in base_car_info}) changes = defaultdict(list) for base_car_model, base_cars in base_car_info.items(): # Match car info changes, and get additions and removals new_cars = new_car_info[base_car_model] car_changes, car_additions, car_removals = match_cars(base_cars, new_cars) # Removals for car_info in car_removals: changes["removals"].append(format_row([car_info.get_column(column, STAR_ICON, VIDEO_ICON, FOOTNOTE_TAG) for column in Column])) # Additions for car_info in car_additions: changes["additions"].append(format_row([car_info.get_column(column, STAR_ICON, VIDEO_ICON, FOOTNOTE_TAG) for column in Column])) for new_car, base_car in car_changes: # Column changes row_diff = build_column_diff(base_car, new_car) if ARROW_SYMBOL in row_diff: changes["column"].append(row_diff) # Detail sentence changes if base_car.detail_sentence != new_car.detail_sentence: changes["detail"].append(f"- Sentence for {base_car.name} changed!\n" + " ```diff\n" + f" - {base_car.detail_sentence}\n" + f" + {new_car.detail_sentence}\n" + " ```") # Print diff if any(len(c) for c in changes.values()): markdown_builder = ["### โš ๏ธ This PR makes changes to [CARS.md](../blob/master/docs/CARS.md) โš ๏ธ"] for title, category in (("## ๐Ÿ”€ Column Changes", "column"), ("## โŒ Removed", "removals"), ("## โž• Added", "additions"), ("## ๐Ÿ“– Detail Sentence Changes", "detail")): if len(changes[category]): markdown_builder.append(title) if category not in ("detail",): markdown_builder.append(COLUMNS) markdown_builder.append(COLUMN_HEADER) markdown_builder.extend(changes[category]) print("\n".join(markdown_builder))
[ 38, 3996, 100, 2443 ]
def METHOD_NAME(self) -> None: self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(self, session): pass
[ 69, 2333 ]
def METHOD_NAME(message_content, config_file, author, all_languages): message_content = message_content.strip() if message_content == "help" or message_content is None or not message_content.startswith('"'): return help_text split_text = message_content.rsplit('" ', 1) if len(split_text) == 1: return help_text split_text += split_text.pop(1).split(" ") if len(split_text) == 2: # There is no source language split_text.append("") if len(split_text) != 3: return help_text (text_to_translate, target_language, source_language) = split_text text_to_translate = text_to_translate[1:] target_language = get_code_for_language(target_language, all_languages) if target_language == "": return language_not_found_text.format("Target") if source_language != "": source_language = get_code_for_language(source_language, all_languages) if source_language == "": return language_not_found_text.format("Source") try: translated_text = translate( text_to_translate, config_file["key"], target_language, source_language ) except requests.exceptions.ConnectionError as conn_err: return f"Could not connect to Google Translate. {conn_err}." except TranslateError as tr_err: return f"Translate Error. {tr_err}." except Exception as err: return f"Error. {err}." return f"{translated_text} (from {author})"
[ 19, 711, 1227, 17 ]
def METHOD_NAME(A: dace.int32[10, 5, 3]): return np.argmin(A, axis=1)
[ 9, 13085, 1170, 5790 ]
def METHOD_NAME(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(self, add_url_rule: Callable): panel_endpoint = f".{self.id}" group_base = "/<string:group>/" add_url_rule(group_base, endpoint="group", view_func=self.group_view) # models base = f"{group_base}<string:Model>/" add_url_rule(base, endpoint="model", view_func=self.model_view) edit_view = Edit.as_view("edit", view_endpoint=panel_endpoint) add_url_rule(f"{base}<int:object_id>", view_func=edit_view) add_url_rule( f"{base}new", view_func=Create.as_view("new", view_endpoint=panel_endpoint) )
[ 428, 2900, 1634 ]
def METHOD_NAME( self, host: str = "", port: int = 0, timeout: float = -999, source_address: tuple[str, int] | None = None ) -> str: ...
[ 707 ]
def METHOD_NAME(): pin_obj = quantiles.PinballLoss( reduction=tf.keras.losses.Reduction.SUM, name="pin_1" ) assert pin_obj.name == "pin_1" assert pin_obj.reduction == tf.keras.losses.Reduction.SUM
[ 9, 200 ]
def METHOD_NAME(self): self.data_dir = tempfile.mkdtemp() database_path = os.path.join(self.data_dir, "mephisto.db") assert self.DB_CLASS is not None, "Did not specify db to use" self.db = self.DB_CLASS(database_path) self.task_run_id = get_test_task_run(self.db) self.task_run = TaskRun.get(self.db, self.task_run_id)
[ 0, 1 ]
f METHOD_NAME(self):
[ 947, 553 ]
def METHOD_NAME(self): """Test that we can generate the metadata without error""" xml, errors = self.backend.generate_metadata_xml() self.assertEqual(len(errors), 0) self.assertEqual(xml.decode()[0], "<")
[ 9, 773, 552 ]
def METHOD_NAME(self): url = "http://localhost:5066/state" return requests.get(url).json()
[ 19, 4640, 551 ]
def METHOD_NAME( self, request: pulumi.language_pb2.GetProgramDependenciesRequest, context: grpc.ServicerContext, ) -> pulumi.language_pb2.GetProgramDependenciesResponse: """GetProgramDependencies returns the set of dependencies required by the program."""
[ 19, 735, 2410 ]
def METHOD_NAME(self): return URIRef('http://test.atramhasis.org/void.ttl#bigdataset')
[ 19, -1 ]
def METHOD_NAME(self, *args, **kwargs): result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) return result
[ 146 ]
def METHOD_NAME(ttFont: TTFont, doc: DesignSpaceDocument, vfName: str) -> None: """Build the STAT table for the variable font identified by its name in the given document. Knowing which variable we're building STAT data for is needed to subset the STAT locations to only include what the variable font actually ships. .. versionadded:: 5.0 .. seealso:: - :func:`getStatAxes()` - :func:`getStatLocations()` - :func:`fontTools.otlLib.builder.buildStatTable()` """ for vf in doc.getVariableFonts(): if vf.name == vfName: break else: raise DesignSpaceDocumentError( f"Cannot find the variable font by name {vfName}" ) region = getVFUserRegion(doc, vf) return fontTools.otlLib.builder.buildStatTable( ttFont, getStatAxes(doc, region), getStatLocations(doc, region), doc.elidedFallbackName if doc.elidedFallbackName is not None else 2, )
[ 56, 16387, 1813, 410 ]
def METHOD_NAME(encoding): """ Given a sparse surface voxelization, fill in between columns. Parameters -------------- encoding: Encoding object or sparse array with shape (?, 3) Returns -------------- A new filled encoding object. """ return enc.SparseBinaryEncoding( ops.METHOD_NAME(_sparse_indices(encoding, rank=3)))
[ 1917, 414 ]
def METHOD_NAME(self): BUF = c_char * 4 buf = BUF()
[ 9, 49, 988 ]
def METHOD_NAME(self): kwargs = super().METHOD_NAME() kwargs["user"] = self.request.user kwargs["organisation"] = self.organisation kwargs["initial"] = { "sender_name": self.organisation.name, "sender": settings.CONTACT_EMAIL, } if not self._check_permission(self.organisation, self.request.user): kwargs["initial"]["receivers"] = models.PROJECT return kwargs
[ 19, 1029, 1475 ]
def METHOD_NAME(self): """ Case from R. Simpson's PhD thesis. His wing tip displacements: 15.627927627927626, 3.3021978021978025 I always get higher deflection when using my gravity implementation instead of his. His results with gravity are not validated.*** :return: """ import sharpy.sharpy_main solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/smith_g_4deg/smith_g_4deg.sharpy') sharpy.sharpy_main.main(['', solver_path]) # read output and compare output_path = os.path.dirname(solver_path) + '/output/smith_g_4deg/WriteVariablesTime/' pos_data = np.genfromtxt(output_path + 'struct_pos_node20.dat') self.assertAlmostEqual((pos_data[2] - 15.55)/15.55, 0.00, 2) self.assertAlmostEqual((pos_data[3] - 3.671)/3.671, 0.00, 2) # results: # N = 10 elements # M = 15 elements # full wake: # Nrollup = 100 # Mstar = 80 # pos last node of the wing # 0.05668 15.5422 3.53971 # total forces: # tstep | fx_st | fy_st | fz_st # 0 | 3.229 | -1.059e-3| 3.766e2 # 7500 seconds
[ 9, -1, 4197 ]
def METHOD_NAME(): dvc_version = f"DVC version: {__version__}{package}" info = [ dvc_version, "-" * len(dvc_version), f"Platform: Python {platform.python_version()} on {platform.platform()}", f"Subprojects:{_get_subprojects()}", f"Supports:{_get_supported_remotes()}", f"Config:{_get_config_dirs()}", ] try: with Repo() as repo: # cache_dir might not exist yet (e.g. after `dvc init`), and we # can't auto-create it, as it might cause issues if the user # later decides to enable shared cache mode with # `dvc config cache.shared group`. if os.path.exists(repo.cache.local.path): info.append(f"Cache types: {_get_linktype_support_info(repo)}") fs_type = _get_fs_type(repo.cache.local.path) info.append(f"Cache directory: {fs_type}") else: info.append("Cache types: " + error_link("no-dvc-cache")) info.append(f"Caches: {_get_caches(repo.cache)}") info.append(f"Remotes: {_get_remotes(repo.config)}") root_directory = repo.root_dir fs_root = _get_fs_type(os.path.abspath(root_directory)) info.append(f"Workspace directory: {fs_root}") info.append(f"Repo: {_get_dvc_repo_info(repo)}") info.append(f"Repo.site_cache_dir: {repo.site_cache_dir}") except NotDvcRepoError: pass except SCMError: info.append("Repo: dvc, git (broken)") return "\n".join(info)
[ 19, 14668, 100 ]
def METHOD_NAME(self, event): self.model.options['compression_level'] = int(self.compression_level_element.value)
[ 69, 4483, 33, 194 ]
def METHOD_NAME(qtbot, editor_pid_watcher): if not editor_pid_watcher.has_pidfile: with qtbot.wait_signal(editor_pid_watcher.appeared, raising=False): pass if not editor_pid_watcher.manual_check(): pytest.fail("Editor pidfile failed to appear!")
[ 618, 2977 ]
def METHOD_NAME(task: UserTask, room: Room): assert task.status == TaskStatus.EXECUTED assert task.user.room == room
[ 638, 3163, 132, 623, 2046 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(variables): head = "\n--- Testing bias() " print "%s%s\n" % (head,"-"*(120-len(head))) for vdict in variables: var = vdict["var"] try: vdict["bias"] = var.bias(var) print vdict["bias"] except il.NotSpatialVariable: pass
[ 9, 1173 ]
def METHOD_NAME(): """ see https://nose.readthedocs.io/en/latest/writing_tests.html#test-packages this is called from __init__.py """ cursor = connection.cursor() sql = """ INSERT INTO public.oauth2_provider_application( id,client_id, redirect_uris, client_type, authorization_grant_type, client_secret, name, user_id, skip_authorization, created, updated) VALUES ( 44,'{oauth_client_id}', 'http://localhost:8000/test', 'public', 'client-credentials', '{oauth_client_secret}', 'TEST APP', {user_id}, false, '1-1-2000', '1-1-2000'); """ sql = sql.format(user_id=1, oauth_client_id=OAUTH_CLIENT_ID, oauth_client_secret=OAUTH_CLIENT_SECRET) cursor.execute(sql) app_settings() # adds languages to system prepare_terms_index(create=True) prepare_concepts_index(create=True) prepare_search_index(create=True)
[ 0, 1, 9, 360 ]
def METHOD_NAME(self): """Structure energy evaluation - dot-bracket string - verbose output""" fc = RNA.fold_compound(seq1) filename= "test_RNA-mfe_eval.py.out" try: with open(filename, 'w') as f: print(filename ," is opened for writing") energy = fc.eval_structure_verbose(struct1, f) energy2 = fc.eval_structure_verbose(struct1) self.assertEqual("%6.2f" % energy, "%6.2f" % -5.60) print( struct1, "[ %6.2f ]" % energy) except IOError: print("Could not open ", filename)
[ 9, 1171, 1011, 3832 ]
def METHOD_NAME(ctx): return True
[ 4344, 717 ]
def METHOD_NAME(protected_header): header = b64_decode(protected_header).decode("utf-8") header_object = json.loads(header) clevis = header_object.get("clevis", None) if clevis is None: return None pin = clevis.get("pin", None) if pin == "tang": return clevis elif pin == "sss": subpins = {} jwes = clevis["sss"]["jwe"] for jwe in jwes: subconf = get_clevis_config_from_jwe(jwe) subpin = subconf["pin"] if subpin not in subpins: subpins[subpin] = [subconf[subpin]] else: subpins[subpin].append(subconf[subpin]) return {"pin": "sss", "sss": {"t": clevis["sss"]["t"], "pins": subpins}} else: return {"pin": pin, pin: {}}
[ 19, -1, 200, 280, 814, 572 ]
def METHOD_NAME(cls, queue_name): for task in cls.all(): task.start(queue_name)
[ 447, 75 ]
def METHOD_NAME( datasets: Sequence[xr.DataArray], weights: Sequence[xr.DataArray], combine_times: bool ) -> xr.DataArray: """Stack datasets blending overlap using weights.""" attrs = _combine_stacked_attrs([data_arr.attrs for data_arr in datasets], combine_times) overlays = [] for weight, overlay in zip(weights, datasets): # Any 'overlay' fill values should already be reflected in the weights # as 0. See _fill_weights_for_invalid_dataset_pixels. We fill NA with # 0 here to avoid NaNs affecting valid pixels in other datasets. Note # `.fillna` does not handle the `_FillValue` attribute so this filling # is purely to remove NaNs. overlays.append(overlay.fillna(0) * weight) # NOTE: Currently no way to ignore numpy divide by 0 warnings without # making a custom map_blocks version of the divide base = sum(overlays) / sum(weights) dims = datasets[0].dims blended_array = xr.DataArray(base, dims=dims, attrs=attrs) return blended_array
[ 1501, 9742, 604, 733 ]
def METHOD_NAME(self, batch: Any, batch_idx: int) -> NoReturn: raise_not_supported("validation")
[ 437, 367 ]
def METHOD_NAME(): text = _get_focused_writable_text() if text is not None and hasattr(text, "indent_region"): text.indent_region()
[ 1660, 4, 3115 ]
def METHOD_NAME(name): debug(f"Extracting episode number from \"{name}\"") if any(ex.search(name) is not None for ex in _excludors): return None for regex in _num_extractors: match = regex.match(name) if match is not None: num = int(match.group(1)) debug(f" Match found, num={num}") return num debug(" No match found") return none
[ 297, 3188, 181 ]
def METHOD_NAME(source_text, module_path, module_name): """ Source texts containing: - str() calls - [...] // To be filled with more aspects will be modified by ast patching, so will not return empty string """ assert visit_ast(source_text, module_path, module_name) is not None
[ 9, 716, 7745, 1180 ]
def METHOD_NAME(self): inp = (u'index.markdown\n' '\tsection.markdown\n' '\t\tsubsection.markdown') sitemap = self.parse(inp) all_sources = sitemap.get_all_sources() self.assertEqual(len(all_sources), 3) self.assertEqual(all_sources['index.markdown'], ['section.markdown']) self.assertEqual(all_sources['section.markdown'], ['subsection.markdown'])
[ 9, 6020 ]
def METHOD_NAME(doc, fig, **kwargs): doc.normalize() locations = [s.location for s in doc.sources] names = [s.name for s in doc.sources] plotLocations(locations, fig, names, **kwargs)
[ 1288, 352 ]
def METHOD_NAME(args: Optional[Sequence[str]] = None) -> Dict[str, Any]: parser: argparse.ArgumentParser = argparse.ArgumentParser() parser.add_argument( f'--{constants.MONGO_GCS_INPUT_URI}', dest=constants.MONGO_GCS_INPUT_URI, required=True, help='MONGO Cloud Storage Input Connection Uri' ) parser.add_argument( f'--{constants.MONGO_GCS_INPUT_DATABASE}', dest=constants.MONGO_GCS_INPUT_DATABASE, required=True, help='MONGO Cloud Storage Input Database Name' ) parser.add_argument( f'--{constants.MONGO_GCS_INPUT_COLLECTION}', dest=constants.MONGO_GCS_INPUT_COLLECTION, required=True, help='MONGO Cloud Storage Input Collection Name' ) parser.add_argument( f'--{constants.MONGO_GCS_OUTPUT_FORMAT}', dest=constants.MONGO_GCS_OUTPUT_FORMAT, required=True, help='Output file format (one of: avro,parquet,csv,json)', choices=[ constants.FORMAT_AVRO, constants.FORMAT_PRQT, constants.FORMAT_CSV, constants.FORMAT_JSON ] ) parser.add_argument( f'--{constants.MONGO_GCS_OUTPUT_LOCATION}', dest=constants.MONGO_GCS_OUTPUT_LOCATION, required=True, help='Cloud Storage location for output files' ) parser.add_argument( f'--{constants.MONGO_GCS_OUTPUT_MODE}', dest=constants.MONGO_GCS_OUTPUT_MODE, required=False, default=constants.OUTPUT_MODE_APPEND, help=( 'Output write mode ' '(one of: append,overwrite,ignore,errorifexists) ' '(Defaults to append)' ), choices=[ constants.OUTPUT_MODE_OVERWRITE, constants.OUTPUT_MODE_APPEND, constants.OUTPUT_MODE_IGNORE, constants.OUTPUT_MODE_ERRORIFEXISTS ] ) add_spark_options(parser, constants.get_csv_output_spark_options("mongo.gcs.output.")) known_args: argparse.Namespace known_args, _ = parser.parse_known_args(args) return vars(known_args)
[ 214, 335 ]
def METHOD_NAME(self) -> None: board = [[0, 1, 0], [0, 0, 0], [0, 0, 2]] response = ":one: :x: :three:\n\n" + ":four: :five: :six:\n\n" + ":seven: :eight: :o:\n\n" self._test_parse_board(board, response)
[ 9, 214, 3261 ]
def METHOD_NAME(self): response: Response = self.client.get( path=reverse( "v2:component-action-list", kwargs={ "cluster_pk": self.cluster_1.pk, "service_pk": self.service_1.pk, "component_pk": self.component_1.pk, }, ), ) self.assertEqual(response.status_code, HTTP_200_OK) self.assertEqual(len(response.json()), 2)
[ 9, 1006, 245, 1434 ]
def METHOD_NAME(exc: Exception) -> str: """Format nicely the exception from a stress command failure.""" if hasattr(exc, "result") and exc.result.failed: return f"Stress command completed with bad status {exc.result.exited}: {exc.result.stderr:.100}" return f"Stress command execution failed with: {exc}"
[ 275, 2903, 1660, 168 ]
def METHOD_NAME(self): return jffi.StructLayout(fields = self.fields, union = self.union)
[ 56 ]
def METHOD_NAME(exception_type): # These attributes can be expected to be present. return False
[ 6894, 241, 442 ]
def METHOD_NAME(self, preprocessors, gradebook, resources): """Is a failing code cell correctly graded?""" cell = create_grade_cell("hello", "code", "foo", 1) cell.metadata.nbgrader['checksum'] = compute_checksum(cell) cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])] nb = new_notebook() nb.cells.append(cell) preprocessors[0].preprocess(nb, resources) gradebook.add_submission("ps0", "bar") preprocessors[1].preprocess(nb, resources) preprocessors[2].preprocess(nb, resources) assert cell.metadata.nbgrader['score'] == 0 assert cell.metadata.nbgrader['points'] == 1 assert 'comment' not in cell.metadata.nbgrader
[ 9, 73, 3534, 544 ]
def METHOD_NAME(icase: int): print('case...%i' % icase)
[ 69, 331 ]
def METHOD_NAME(): # WB par = os.path.join(datadir, "B1855+09_NANOGrav_12yv3.wb.gls.par") tim = os.path.join(datadir, "B1855+09_NANOGrav_12yv3.wb.tim") m, t = get_model_and_toas(par, tim) f = fitter.Fitter.auto(t, m, downhill=True) assert isinstance(f, fitter.WidebandDownhillFitter) f = fitter.Fitter.auto(t, m, downhill=False) assert isinstance(f, fitter.WidebandTOAFitter) # correlated errors m, t = get_model_and_toas( os.path.join(datadir, "J0023+0923_NANOGrav_11yv0.gls.par"), os.path.join(datadir, "J0023+0923_NANOGrav_11yv0.tim"), ) f = fitter.Fitter.auto(t, m, downhill=True) assert isinstance(f, fitter.DownhillGLSFitter) f = fitter.Fitter.auto(t, m, downhill=False) assert isinstance(f, fitter.GLSFitter) # uncorrelated errors m, t = get_model_and_toas( os.path.join(datadir, "NGC6440E.par"), os.path.join(datadir, "NGC6440E.tim") ) f = fitter.Fitter.auto(t, m, downhill=True) assert isinstance(f, fitter.DownhillWLSFitter) f = fitter.Fitter.auto(t, m, downhill=False) assert isinstance(f, fitter.WLSFitter)
[ 9, -1 ]
def METHOD_NAME(self, jobs_root): if not os.path.exists(jobs_root): os.makedirs(jobs_root) random_folder_name = str(uuid.uuid4()) full_path = os.path.join(jobs_root, random_folder_name) if not os.path.exists(full_path): os.makedirs(full_path) return full_path
[ 129, 198, 11189, 1190 ]
def METHOD_NAME(name): match = re.fullmatch(r"xnn_(f16|f32)_ibilinear_chw_ukernel__(.+)_p(\d+)", name) assert match is not None pixel_tile = int(match.group(3)) channel_tile = 1 arch, isa, assembly = xnncommon.parse_target_name(target_name=match.group(2)) return channel_tile, pixel_tile, arch, isa
[ 265, 6826, 156 ]
def METHOD_NAME(self): if super().METHOD_NAME(): return True return self.editable is not None and self.editable.can_see_timeline(session.user)
[ 1046, 1179, 10281 ]
def METHOD_NAME(self, name): self._name = str(name)
[ 0, 156 ]
def METHOD_NAME(self, t): fd, fh, bbox, pfix, args = self.domain.params dptol, ttol = self.params point = self.mesh.point N = point.shape[0] A, B = self.get_iterate_matrix() D = spdiags(1.0/A.diagonal(), 0, N, N) C = -(triu(A, 1) + tril(A, -1)) X = D*(C*point[:, 0] - B*point[:, 1]) Y = D*(B*point[:, 0] + C*point[:, 1]) dxdt = np.zeros((N, 2), dtype=np.float) dxdt[:,0] = X - point[:,0] dxdt[:,1] = Y - point[:,1] if pfix is not None: dxdt[0:pfix.shape[0],:] = 0 return dxdt
[ 5710, 3475 ]
def METHOD_NAME( state_test: StateTestFiller, pre: Mapping[str, Account], post: Mapping[str, Account], tx: Transaction, ): """ Perform MCOPY operations that expand the memory, and verify the gas it costs to do so. """ state_test( env=Environment(), pre=pre, post=post, txs=[tx], )
[ 9, 10808, 1645, 4691 ]
def METHOD_NAME(self): """test invalid username""" response = self.client.post( reverse("authentik_providers_oauth2:token"), { "grant_type": GRANT_TYPE_CLIENT_CREDENTIALS, "scope": SCOPE_OPENID, "client_id": self.provider.client_id, "username": "saa", "password": self.token.key, }, ) self.assertEqual(response.status_code, 400) self.assertJSONEqual( response.content.decode(), {"error": "invalid_grant", "error_description": TokenError.errors["invalid_grant"]}, )
[ 9, 909, 21 ]
def METHOD_NAME(self): import glob l = glob.glob('/OVS/Repositories/*/VirtualMachines/'+self.uuid+'/vm.cfg')+glob.glob(os.path.join(self.xen_d, self.uuid)) if len(l) > 1: self.log.warning("%d configuration files found in repositories (%s)"%(len(l), str(l))) elif len(l) == 0: raise ex.Error("no configuration file found in repositories") return l[0]
[ 416, -1 ]
def METHOD_NAME(message, require_encryption, key_encryption_key, resolver): ''' Returns the decrypted message contents from an EncryptedQueueMessage. If no encryption metadata is present, will return the unaltered message. :param str message: The JSON formatted QueueEncryptedMessage contents with all associated metadata. :param bool require_encryption: If set, will enforce that the retrieved messages are encrypted and decrypt them. :param object key_encryption_key: The user-provided key-encryption-key. Must implement the following methods: unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. get_kid()--returns a string key id for this key-encryption-key. :param function resolver(kid): The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. :return: The plain text message from the queue message. :rtype: str ''' try: message = loads(message) encryption_data = _dict_to_encryption_data(message['EncryptionData']) decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents']) except (KeyError, ValueError): # Message was not json formatted and so was not encrypted # or the user provided a json formatted message. if require_encryption: raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED) return message try: return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') except Exception: raise AzureException(_ERROR_DECRYPTION_FAILURE)
[ 443, 651, 277 ]
def METHOD_NAME(self): try: obj = GeoConfig.objects.get(domain=self.domain) except GeoConfig.DoesNotExist: obj = GeoConfig() obj.domain = self.domain return obj
[ 200 ]
def METHOD_NAME(params_file,module_name="params"): """ Helper to load a python file used to provide input parameters. """ pscript = "from visit_utils import *\n" + open(params_file).read() params = define_module(module_name,pscript) # auto setup 'root' PropertyTree if not 'root' in dir(params): root = PropertyTree() for val in dir(params): if val.count("__") != 2: root[val] = params.__dict__[val] params.__dict__['root'] = root return params
[ 557, 434 ]
def METHOD_NAME(self, method): # pylint: disable=unused-argument """setup method.""" chrome_options = Options() chrome_options.add_argument("--headless") chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_argument("disable-infobars") chrome_options.add_argument("--disable-extensions") chrome_options.add_argument("--disable-gpu") chrome_options.add_argument("--no-sandbox") self.driver = webdriver.Chrome( options=chrome_options) # pylint: disable=attribute-defined-outside-init self.driver.implicitly_wait(10)
[ 102, 103 ]
def METHOD_NAME(self): pytest.skip("float precision test not supported for SpectralVariance")
[ 9, 712, 1819, 1582, 679, 99 ]
def METHOD_NAME(self): """ This method is used to construct the whole restructure computation graph for call_func node with bias addition inside. A whole restructure computation graph will contain a weight node, a bias node, a non-bias addition computation node, a bias reshape node if needed and a bias addition node. Use torch.addmm as an example: The origin node is: %addmm: call_func[target=torch.addmm](args = (%input_1, m1, m2), kwargs = {beta=1, alpha=1}) Restructured graph is: %transpose : [#users=1] = call_function[target=torch.transpose](args = (%m2, 0, 1), kwargs = {}) %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%m1, %transpose), kwargs = {}) %mul : [#users=1] = call_function[target=operator.mul](args = (%input_1, 3), kwargs = {}) %mul_1 : [#users=1] = call_function[target=operator.mul](args = (2, %linear), kwargs = {}) %add : [#users=1] = call_function[target=operator.add](args = (%mul_1, %mul), kwargs = {}) """ pass
[ 567 ]
def METHOD_NAME(stat): click.echo('\nSystem services and devices monitor list\n') header = ['Name', 'Status', 'Type'] table = [] for elements in stat.values(): for element in sorted(elements.items(), key=lambda x: x[1]['status']): entry = [] entry.append(element[0]) entry.append(element[1]['status']) entry.append(element[1]['type']) table.append(entry) click.echo(tabulate(table, header))
[ 52, 1863, 245 ]
def METHOD_NAME(self): import adafruit_adxl34x from adafruit_extended_bus import ExtendedI2C self.sensor = adafruit_adxl34x.ADXL345( ExtendedI2C(self.input_dev.i2c_bus), address=int(str(self.input_dev.i2c_location), 16)) if self.range == '2': self.sensor.range = adafruit_adxl34x.Range.RANGE_2_G elif self.range == '4': self.sensor.range = adafruit_adxl34x.Range.RANGE_4_G elif self.range == '8': self.sensor.range = adafruit_adxl34x.Range.RANGE_8_G elif self.range == '16': self.sensor.range = adafruit_adxl34x.Range.RANGE_16_G
[ 15 ]
def METHOD_NAME(self): # Set a qty = 2 in one line of a contract self.contract3.contract_line_ids.filtered( lambda line: line.name == "Line" ).quantity = 2 wizard = ( self.env["split.contract"] .with_context(active_id=self.contract3.id) .create({}) ) wizard.partner_id = self.partner_2.id wizard.split_line_ids.quantity_to_split = 0 initial_contracts_length = self.env["contract.contract"].search_count([]) # set quantity to split in the wizard wizard.split_line_ids.filtered(lambda l: l.name == "Line").quantity_to_split = 1 # confirm wizard with setting to_split quantities new_contract = wizard.action_split_contract() # A new contract must have been created. self.assertEqual( initial_contracts_length + 1, self.env["contract.contract"].search_count([]) ) # new contract has partner_2 as partner_id self.assertEqual(self.partner_2.id, new_contract.partner_id.id) # new contract has now the splitted line with a qty of one self.assertEqual(1, len(new_contract.contract_line_ids.ids)) self.assertEqual(1, new_contract.contract_line_ids.quantity) self.assertEqual( self.contract3, new_contract.contract_line_ids.mapped("splitted_from_contract_id"), ) # Original contract still has 3 lines but with a qty=1 in the last line named "Line" self.assertEqual(3, len(self.contract3.contract_line_ids.ids)) self.assertEqual( 1, self.contract3.contract_line_ids.filtered( lambda l: l.name == "Line" ).quantity, )
[ 9, 265, 206, 534, 206, 13417 ]
def METHOD_NAME(self) -> Optional[str]: """ The timestamp of resource creation (UTC). """ return pulumi.get(self, "created_at")
[ 152, 1541 ]
def METHOD_NAME(self): """Return the full path of this file.""" return self.file.name
[ 5944, 171, 157 ]
def METHOD_NAME(bboxes, image_shape: Tuple[int, int]): """ Transforms bboxes from xyxy format to CX-CY-W-H format. This function operates in-place. Not that bboxes dtype is preserved, and it may lead to unwanted rounding errors when computing a center of bbox. :param bboxes: BBoxes of shape (..., 4) in XYXY format :return: BBoxes of shape (..., 4) in CX-CY-W-H format """ if not torch.jit.is_scripting(): if torch.is_tensor(bboxes) and not torch.is_floating_point(bboxes): warnings.warn( f"Detected non floating-point ({bboxes.dtype}) input to xyxy_to_cxcywh_inplace function. This may cause rounding errors and lose of precision. " "You may want to convert your array to floating-point precision first." ) elif isinstance(bboxes, np.ndarray) and not is_floating_point_array(bboxes): warnings.warn( f"Detected non floating-point input ({bboxes.dtype}) to xyxy_to_cxcywh_inplace function. This may cause rounding errors and lose of precision. " "You may want to convert your array to floating-point precision first." ) bboxes[..., 2:4] -= bboxes[..., 0:2] # x2y2 - x1y2 -> wh bboxes[..., 0:2] += bboxes[..., 2:4] * 0.5 # cxcywh return bboxes
[ 9316, 24, 9317, 5920 ]
def METHOD_NAME(self, index): """ Retrieves the output power in micro watts of a power supply unit (PSU) defined by 1-based index <index> :param index: An integer, 1-based index of the PSU of which to query o/p power :return: An integer, value of o/p power in micro Watts if PSU is good, else zero """ if index is None: raise RuntimeError("index shouldn't be None") if not self.get_psu_presence(index) or not self.get_psu_status(index): return 0 power = self._read_file(self.psu_power, index) return power
[ 19, 146, 1928 ]